Search Machine Learning Repository: @inproceedings{icml2014c2_brunskill14,
    Publisher = {JMLR Workshop and Conference Proceedings},
    Title = {PAC-inspired Option Discovery in Lifelong Reinforcement Learning},
    Url = {http://jmlr.org/proceedings/papers/v32/brunskill14.pdf},
    Abstract = {A key goal of AI is to create lifelong learning agents that can leverage prior experience to improve performance on later tasks. In reinforcement-learning problems, one way to summarize prior experience for future use is through options, which are temporally extended actions (subpolicies) for how to behave. Options can then be used to potentially accelerate learning in new reinforcement learning tasks. In this work, we provide the first formal analysis of the sample complexity, a measure of learning speed, of reinforcement learning with options. This analysis helps shed light on some interesting prior empirical results on when and how options may accelerate learning. We then quantify the benefit of options in reducing sample complexity of a lifelong learning agent. Finally, the new theoretical insights inspire a novel option-discovery algorithm that aims at minimizing overall sample complexity in lifelong reinforcement learning.},
    Author = {Emma Brunskill and Lihong Li},
    Editor = {Tony Jebara and Eric P. Xing},
    Year = {2014},
    Booktitle = {Proceedings of the 31st International Conference on Machine Learning (ICML-14)},
    Pages = {316-324}
   }