Search Machine Learning Repository: @inproceedings{icml2014c2_liud14,
    Publisher = {JMLR Workshop and Conference Proceedings},
    Title = {An Asynchronous Parallel Stochastic Coordinate Descent Algorithm},
    Url = {http://jmlr.org/proceedings/papers/v32/liud14.pdf},
    Abstract = {We describe an asynchronous parallel stochastic coordinate descent algorithm for minimizing smooth unconstrained or separably constrained functions. The method achieves a linear convergence rate on functions that satisfy an essential strong convexity property and a sublinear rate ($1/K$) on general convex functions. Near-linear speedup on a multicore system can be expected if the number of processors is $O(n^{1/2})$ in unconstrained optimization and $O(n^{1/4})$ in the separable-constrained case, where $n$ is the number of variables. We describe results from implementation on 40-core processors.},
    Author = {Ji Liu and Steve Wright and Christopher Re and Victor Bittorf and Srikrishna Sridhar},
    Editor = {Tony Jebara and Eric P. Xing},
    Year = {2014},
    Booktitle = {Proceedings of the 31st International Conference on Machine Learning (ICML-14)},
    Pages = {469-477}
   }