Search Machine Learning Repository: @inproceedings{icml2014c1_jain14,
    Publisher = {JMLR Workshop and Conference Proceedings},
    Title = {(Near) Dimension Independent Risk Bounds for Differentially Private Learning},
    Url = {http://jmlr.org/proceedings/papers/v32/jain14.pdf},
    Abstract = {In this paper, we study the problem of differentially private risk minimization where the goal is to provide differentially private algorithms that have small excess risk. In particular we address the following open problem: \emph{Is it possible to design computationally efficient differentially private risk minimizers with excess risk bounds that do not explicitly depend on dimensionality ($p$) and do not require structural assumptions like restricted strong convexity?} In this paper, we answer the question in the affirmative for a variant of the well-known \emph{output} and \emph{objective} perturbation algorithms [Chaudhuri et al., 2011]. In particular, we show that in generalized linear model, variants of both output and objective perturbation algorithms have no {\em explicit} dependence on $p$. Our results assume that the underlying loss function is a $1$-Lipschitz convex function and we show that the excess risk depends only on $L_2$ norm of the true risk minimizer and that of training points. Next, we present a novel privacy preserving algorithm for risk minimization over simplex in the generalized linear model, where the loss function is a doubly differentiable convex function. Assuming that the training points have bounded $L_\infty$-norm, our algorithm provides risk bound that has only {\em logarithmic} dependence on $p$. We also apply our technique to the online learning setting and obtain a regret bound with similar logarithmic dependence on $p$. In contrast, the existing differentially private online learning methods incur $O(\sqrt{p})$ dependence.},
    Author = {Prateek Jain and Abhradeep G. Thakurta},
    Editor = {Tony Jebara and Eric P. Xing},
    Year = {2014},
    Booktitle = {Proceedings of the 31st International Conference on Machine Learning (ICML-14)},
    Pages = {476-484}
   }