@inproceedings{nakov-guzman-vogel:2013:Short,
abstract = {While experimenting with tuning on long sentences, we made an unexpected discovery: that PRO falls victim to monsters -overly long negative examples with very low BLEU+1 scores, which are unsuitable for learning and can cause testing BLEU to drop by several points absolute. We propose several effective ways to address the problem, using length- and BLEU+1- based cut-offs, outlier filters, stochastic sampling, and random acceptance. The best of these fixes not only slay and protect against monsters, but also yield higher stability for PRO as well as improved test-time BLEU scores. Thus, we recommend them to anybody using PRO, monster-believer or not.},
address = {Sofia, Bulgaria},
author = {Nakov, Preslav and Guzm{\'a}n, Francisco and Vogel, Stephan},
booktitle = {Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics ({ACL'13})},
link = {http://www.aclweb.org/anthology/P13-2003},
month = {August},
pages = {12--17},
title = {A Tale about {PRO} and Monsters},
year = {2013}
}