@inproceedings{guzman-EtAl:2015:WMT,
abstract = {In this paper, we take a closer look at the MT evaluation process from a glass-box perspective using eye-tracking. We analyze two aspects of the evaluation task: the background of evaluators (monolingual or bilingual) and the sources of information available, and we evaluate them using time and consistency as criteria. Our findings show that monolinguals are slower but more consistent than bilinguals, especially when only target language information is available. When exposed to various sources of information, evaluators in general take more time and in the case of monolinguals, there is a drop in consistency. Our findings suggest that to have consistent and cost effective MT evaluations, it is better to use monolinguals with only target language information.},
address = {Lisbon, Portugal},
author = {Guzm\'{a}n, Francisco and Abdelali, Ahmed and Temnikova, Irina and Sajjad, Hassan and Vogel, Stephan},
booktitle = {Proceedings of the Tenth Workshop on Statistical Machine Translation},
link = {http://aclweb.org/anthology/W15-3059},
month = {September},
pages = {457--466},
publisher = {Association for Computational Linguistics},
title = {How do Humans Evaluate Machine Translation},
year = {2015}
}