@conference {DodisLoMiVa12,
title = {Differential Privacy with Imperfect Randomness},
booktitle = {Proceedings of the 32nd International Cryptology Conference (CRYPTO {\textquoteleft}12)},
series = {Lecture Notes on Computer Science},
volume = {7417},
year = {2012},
month = {19{\textendash}23 August},
pages = {497{\textendash}516},
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
edition = {Lecture Notes on Computer Science},
address = {Santa Barbara, CA},
abstract = {In this work we revisit the question of basing cryptography on imperfect randomness. Bosley and Dodis (TCC{\textquoteright}07) showed that if a source of randomness R is {\textquotedblleft}good enough{\textquotedblright} to generate a secret key capable of encrypting k bits, then one can deterministically extract nearly k almost uniform bits from R, suggesting that traditional privacy notions (namely, indistinguishability of encryption) requires an {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other, even stronger impossibility results are known for achieving privacy under specific {\textquotedblleft}non-extractable{\textquotedblright} sources of randomness, such as the γ-Santha-Vazirani (SV) source, where each next bit has fresh entropy, but is allowed to have a small bias γ \< 1 (possibly depending on prior bits). We ask whether similar negative results also hold for a more recent notion of privacy called differential privacy (Dwork et al., TCC{\textquoteright}06), concentrating, in particular, on achieving differential privacy with the Santha-Vazirani source. We show that the answer is no. Specifically, we give a differentially private mechanism for approximating arbitrary {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even with randomness coming from a γ-Santha-Vazirani source, for any γ \< 1. This provides a somewhat surprising {\textquotedblleft}separation{\textquotedblright} between traditional privacy and differential privacy with respect to imperfect randomness. Interestingly, the design of our mechanism is quite different from the traditional {\textquotedblleft}additive-noise{\textquotedblright} mechanisms (e.g., Laplace mechanism) successfully utilized to achieve differential privacy with perfect randomness. Indeed, we show that any (accurate and private) {\textquotedblleft}SV-robust{\textquotedblright} mechanism for our problem requires a demanding property called consistent sampling, which is strictly stronger than differential privacy, and cannot be satisfied by any additive-noise mechanism.},
url = {http://link.springer.com/chapter/10.1007\%2F978-3-642-32009-5_29},
author = {Yevgeniy Dodis and Adriana L{\'o}pez-Alt and Ilya Mironov and Salil Vadhan}
}
@conference {ThalerUlVa12,
title = {Faster Algorithms for Privately Releasing Marginals},
booktitle = {Automata, Languages, and Programming - 39th International Colloquium, ICALP 2012},
volume = {7391},
year = {2012},
month = {9-13 Jul.},
publisher = {Springer},
organization = {Springer},
edition = {Lecture Notes in Computer Science},
address = {Warwick, UK},
abstract = {We study the problem of releasing k-way marginals of a database D ∈ ({0, 1} d ) n , while preserving differential privacy. The answer to a k-way marginal query is the fraction of D{\textquoteright}s records x ∈ {0, 1} d with a given value in each of a given set of up to k columns. Marginal queries enable a rich class of statistical analyses of a dataset, and designing efficient algorithms for privately releasing marginal queries has been identified as an important open problem in private data analysis (cf. Barak et. al., PODS {\textquoteright}07). We give an algorithm that runs in time dO(k√) and releases a private summary capable of answering any k-way marginal query with at most {\textpm}.01 error on every query as long as n>=dO(k√) . To our knowledge, ours is the first algorithm capable of privately releasing marginal queries with non-trivial worst-case accuracy guarantees in time substantially smaller than the number of k-way marginal queries, which is d Θ(k) (for k << d).},
url = {http://dx.doi.org/10.1007/978-3-642-31594-7_68},
author = {Justin Thaler and Jonathan Ullman and Salil P. Vadhan}
}
@conference {DBLP:conf/tcc/GuptaRU12,
title = {Iterative Constructions and Private Data Release},
booktitle = {Theory of Cryptography - 9th Theory of Cryptography Conference, TCC 2012},
volume = {7194},
year = {2012},
month = {19-21 Mar.},
pages = {339-356},
publisher = {Springer},
organization = {Springer},
edition = {Lecture Notes in Computer Science},
address = {Taormina, Sicily, Italy},
abstract = {In this paper we study the problem of approximately releasing the cut function of a graph while preserving differential privacy, and give new algorithms (and new analyses of existing algorithms) in both the interactive and non-interactive settings. Our algorithms in the interactive setting are achieved by revisiting the problem of releasing differentially private, approximate answers to a large number of queries on a database. We show that several algorithms for this problem fall into the same basic framework, and are based on the existence of objects which we call iterative database construction algorithms. We give a new generic framework in which new (efficient) IDC algorithms give rise to new (efficient) interactive private query release mechanisms. Our modular analysis simplifies and tightens the analysis of previous algorithms, leading to improved bounds. We then give a new IDC algorithm (and therefore a new private, interactive query release mechanism) based on the Frieze/Kannan low-rank matrix decomposition. This new release mechanism gives an improvement on prior work in a range of parameters where the size of the database is comparable to the size of the data universe (such as releasing all cut queries on dense graphs). We also give a non-interactive algorithm for efficiently releasing private synthetic data for graph cuts with error O(|V|1.5). Our algorithm is based on randomized response and a non-private implementation of the SDP-based, constant-factor approximation algorithm for cut-norm due to Alon and Naor. Finally, we give a reduction based on the IDC framework showing that an efficient, private algorithm for computing sufficiently accurate rank-1 matrix approximations would lead to an improved efficient algorithm for releasing private synthetic data for graph cuts. We leave finding such an algorithm as our main open problem.},
url = {http://dx.doi.org/10.1007/978-3-642-28914-9_19},
author = {Anupam Gupta and Aaron Roth and Jonathan Ullman}
}
@conference {DworkNaVa12,
title = {The Privacy of the Analyst and the Power of the State},
booktitle = {Proceedings of the 53rd Annual {IEEE} Symposium on Foundations of Computer Science (FOCS {\textquoteleft}12)},
year = {2012},
month = {20{\textendash}23 October},
pages = {400{\textendash}409},
publisher = {IEEE},
organization = {IEEE},
address = {New Brunswick, NJ},
abstract = {We initiate the study of "privacy for the analyst" in differentially private data analysis. That is, not only will we be concerned with ensuring differential privacy for the data (i.e. individuals or customers), which are the usual concern of differential privacy, but we also consider (differential) privacy for the set of queries posed by each data analyst. The goal is to achieve privacy with respect to other analysts, or users of the system. This problem arises only in the context of stateful privacy mechanisms, in which the responses to queries depend on other queries posed (a recent wave of results in the area utilized cleverly coordinated noise and state in order to allow answering privately hugely many queries). We argue that the problem is real by proving an exponential gap between the number of queries that can be answered (with non-trivial error) by stateless and stateful differentially private mechanisms. We then give a stateful algorithm for differentially private data analysis that also ensures differential privacy for the analyst and can answer exponentially many queries.},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6375318\&tag=1},
author = {Cynthia Dwork and Moni Naor and Salil Vadhan}
}
@manuascript {KearnsPaRoUl12,
title = {Private Equilibrium Release, Large Games, and No-Regret Learning},
year = {2012},
abstract = {We give mechanisms in which each of n players in a game is given their component of an (approximate) equilibrium in a way that guarantees differential privacy---that is, the revelation of the equilibrium components does not reveal too much information about the utilities of the other players. More precisely, we show how to compute an approximate correlated equilibrium (CE) under the constraint of differential privacy (DP), provided n is large and any player{\textquoteright}s action affects any other{\textquoteright}s payoff by at most a small amount. Our results draw interesting connections between noisy generalizations of classical convergence results for no-regret learning, and the noisy mechanisms developed for differential privacy. Our results imply the ability to truthfully implement good social-welfare solutions in many games, such as games with small Price of Anarchy, even if the mechanism does not have the ability to enforce outcomes. We give two different mechanisms for DP computation of approximate CE. The first is computationally efficient, but has a suboptimal dependence on the number of actions in the game; the second is computationally efficient, but allows for games with exponentially many actions. We also give a matching lower bound, showing that our results are tight up to logarithmic factors.},
url = {http://arxiv.org/abs/1207.4084},
author = {Michael Kearns and Mallesh Pai and Aaron Roth and Jonathan Ullman}
}