@inproceedings{1a73b3dc59d343b886d4961b064dbb02,
title = "Personalized feedback versus money: The effect on reliability of subjective data in online experimental platforms",
abstract = "We compared the data reliability on a subjective task from two platforms: Amazon's Mechanical Turk (MTurk) and LabintheWild. MTurk incentivizes participants with financial compensation while LabintheWild provides participants with personalized feedback. LabintheWild was found to produce higher data reliability than MTurk. Our findings suggested that online experiment platforms providing feedback in exchange for study participation could produce more reliable data in subjective preference tasks than those offering financial compensation.",
keywords = "Compensation, Crowdsourcing, Data Quality, Incentives, Mechanical Turk, Motivation, Online Experimentation",
author = "Teng Ye and Katharina Reinecke and Robert, {Lionel P.}",
year = "2017",
month = feb,
day = "25",
doi = "10.1145/3022198.3026339",
language = "English (US)",
series = "CSCW 2017 - Companion of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing",
publisher = "Association for Computing Machinery, Inc",
pages = "343--346",
booktitle = "CSCW 2017 - Companion of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing",
note = "2017 ACM Conference on Computer Supported Cooperative Work and Social Computing, CSCW 2017 ; Conference date: 25-02-2017 Through 01-03-2017",
}