Search Machine Learning Repository: @inproceedings{icml2014c2_steinhardtb14,
    Publisher = {JMLR Workshop and Conference Proceedings},
    Title = {Adaptivity and Optimism: An Improved Exponentiated Gradient Algorithm},
    Url = {http://jmlr.org/proceedings/papers/v32/steinhardtb14.pdf},
    Abstract = {We present an adaptive variant of the exponentiated gradient algorithm. Leveraging the optimistic learning framework of Rakhlin & Sridharan (2012), we obtain regret bounds that in the learning from experts setting depend on the variance and path length of the best expert, improving on results by Hazan & Kale (2008) and Chiang et al. (2012), and resolving an open problem posed by Kale (2012). Our techniques naturally extend to matrix-valued loss functions, where we present an adaptive matrix exponentiated gradient algorithm. To obtain the optimal regret bound in the matrix case, we generalize the Follow-the-Regularized-Leader algorithm to vector-valued payoffs, which may be of independent interest.},
    Author = {Jacob Steinhardt and Percy Liang},
    Editor = {Tony Jebara and Eric P. Xing},
    Year = {2014},
    Booktitle = {Proceedings of the 31st International Conference on Machine Learning (ICML-14)},
    Pages = {1593-1601}
   }