@report {1498273, title = {Comments on the City of Seattle Open Data Risk Assessment}, year = {2017}, abstract = {The transparency goals of the open data movement serve important social, economic, and democratic functions in cities like Seattle. At the same time, some municipal datasets about the city and its citizens{\textquoteright} activities carry inherent risks to individual privacy when shared publicly. In 2016, the City of Seattle declared in its Open Data Policy that the city{\textquoteright}s data would be {\textquotedblleft}open by preference,{\textquotedblright} except when doing so may affect individual privacy. To ensure its Open Data program effectively protects individuals, Seattle committed to performing an annual risk assessment and tasked the Future of Privacy Forum (FPF) with creating and deploying an initial privacy risk assessment methodology for open data.This Draft Report provides tools and guidance to the City of Seattle and other municipalities navigating the complex policy, operational, technical, organizational, and ethical standards that support privacyprotective open data programs. Although there is a growing body of research on open data privacy, open data managers and departmental data owners need to be able to employ a standardized methodology for assessing the privacy risks and benefits of particular datasets internally, without a bevy of expert statisticians, privacy lawyers, or philosophers. By following a flexible, risk-based assessment process, the City of Seattle {\textendash} and other municipal open data programs {\textendash} can maximize the utility and openness of civic data while minimizing privacy risks to individuals and community concerns about ethical challenges, fairness, and equity.This Draft Report first describes inherent privacy risks in an open data landscape, with an emphasis on potential harms related to re-identification, data quality, and fairness. Accompanying this, the Draft Report includes a Model Open Data Benefit Risk Analysis (MODBRA). The model template evaluates the types of data contained in a proposed open dataset, the potential benefits {\textendash} and concomitant risks {\textendash} of releasing the dataset publicly, and strategies for effective de-identification and risk mitigation. This holistic assessment guides city officials to determine whether to release the dataset openly, in a limited access environment, or to withhold it from publication (absent countervailing public policy considerations). The Draft Report methodology builds on extensive work done in this field by experts at the National Institute of Standards and Technology, the University of Washington, the Berkman Klein Center for Internet \& Society at Harvard University, and others, and adapts existing frameworks to the unique challenges faced by cities as local governments, technological system integrators, and consumer facing service providers.}, url = { https://fpf.org/wp-content/uploads/2018/01/Wood-Altman-Baleato-Vadhan_Comments-on-FPF-Seattle-Open-Data-Draft-Report.pdf}, author = {Alexandra Wood and Micah Altman and Suso Baleato and Salil Vadhan} } @article {962646, title = {Relational Cost Analysis}, journal = {Symposium on the Principle of Programming Languages, ACM}, year = {2017}, month = {Jan 2017}, abstract = {Establishing quantitative bounds on the execution cost of programs is essential in many areas of computer science such as complexity analysis, compiler optimizations, security and privacy. Techniques based on program analysis, type systems and abstract interpretation are well-studied, but methods for analyzing how the execution costs of two programs compare to each other have not received attention. Naively combining the worst and best case execution costs of the two programs does not work well in many cases because such analysis forgets the similarities between the programs or the inputs. In this work, we propose a relational cost analysis technique that is capable of establishing precise bounds on the difference in the execution cost of two programs by making use of relational properties of programs and inputs. We develop RelCost, a refinement type and effect system for a higher-order functional language with recursion and subtyping. The key novelty of our technique is the combination of relational refinements with two modes of typing{\textemdash}relational typing for reasoning about similar computations/inputs and unary typing for reasoning about unrelated computations/inputs. This combination allows us to analyze the execution cost difference of two programs more precisely than a naive non-relational approach. We prove our type system sound using a semantic model based on step-indexed unary and binary logical relations accounting for non-relational and relational reasoning principles with their respective costs. We demonstrate the precision and generality of our technique through examples.}, author = {Ezgi Cicek and Gilles Barthe and Marco Gaboardi and Deepak Garg and Jan Hoffmann} } @article {962666, title = {A Semantic Account of Metric Preservation}, journal = {Symposium on the Principle of Programming Languages, ACM}, year = {2017}, month = {Jan 2017}, abstract = {Program sensitivity measures how robust a program is to small changes in its input, and is a fundamental notion in domains ranging from differential privacy to cyber-physical systems. A natural way to formalize program sensitivity is in terms of metrics on the input and output spaces, requiring that an\ r-sensitive function map inputs that are at distance\ d\ to outputs that are at distance at most\ r.d. Program sensitivity is thus an analogue of Lipschitz continuity for programs. Reed and Pierce introduced Fuzz, a functional language with a linear type system that can express program sensitivity. They show soundness operationally, in the form of a metric preservation property. Inspired by their work, we study program sensitivity and metric preservation from a denotational point of view. In particular, we introduce metric CPOs, a novel semantic structure for reasoning about computation on metric spaces, by endowing CPOs with a compatible notion of distance. This structure is useful for reasoning about metric properties of programs, and specifically about program sensitivity. We demonstrate metric CPOs by giving a model for the deterministic fragment of Fuzz.}, url = {https://arxiv.org/abs/1702.00374}, author = {Arthur Azevedo de Amorim and Marco Gaboardi and Justin Hsu and Shin-ya Katsumata and Ikram Cherigui} } @article {1200641, title = {Between Pure and Approximate Differential Privacy}, journal = {Journal of Privacy and Confidentiality}, year = {2017}, abstract = {We show a new lower bound on the sample complexity of (ε, δ)-differentially private algorithms that accurately answer statistical queries on high-dimensional databases. The novelty of our bound is that it depends optimally on the parameter δ, which loosely corresponds to the probability that the algorithm fails to be private, and is the first to smoothly interpolate between approximate differential privacy (δ \> 0) and pure differential privacy (δ = 0).}, author = {Thomas Steinke and Jonathan Ullman} } @inbook {1200421, title = {The Complexity of Differential Privacy}, booktitle = {Tutorials on the Foundations of Cryptography}, year = {2017}, pages = {347-450}, publisher = {Springer, Yehuda Lindell, ed.}, organization = {Springer, Yehuda Lindell, ed.}, abstract = {Version History:\ August 2016: Manuscript v1 (see files attached)March 2017: Manuscript v2 (see files attached); ErrataApril 2017: Published Version (in Tutorials on the Foundations of Cryptography; see above)Differential privacy is a theoretical framework for ensuring the privacy of individual-level data when performing statistical analysis of privacy-sensitive datasets. This tutorial provides an introduction to and overview of differential privacy, with the goal of conveying its deep connections to a variety of other topics in computational complexity, cryptography, and theoretical computer science at large. This tutorial is written in celebration of Oded Goldreich{\textquoteright}s 60th birthday, starting from notes taken during a minicourse given by the author and Kunal Talwar at the 26th McGill Invitational Workshop on Computational Complexity [1].}, url = {https://link.springer.com/chapter/10.1007/978-3-319-57048-8_7}, author = {Salil Vadhan} } @article {1200661, title = {The DataTags System: Sharing Sensitive Data with Confidence}, journal = {Research Data Alliance (RDA) 8th Plenary on Privacy Implications of Research Data Sets, during International Data Week 2016}, year = {2017}, author = {Merce Crosas} } @conference {1200491, title = {Decoupled Classifiers for Fair and Efficient Machine Learning}, booktitle = {Fairness and Transparency in Machine Learning Conference (FATML)}, year = {2017}, abstract = {When it is ethical and legal to use a sensitive attribute (such as gender or race) in machine learning systems, the question remains how to do so. We show that the naive application of machine learning algorithms using sensitive features leads to an inherent tradeoff in accuracy between groups. We provide a simple and efficient decoupling technique, that can be added on top of any black-box machine learning algorithm, to learn different classifiers for different groups. Transfer learning is used to mitigate the problem of having too little data on any one group.\ The method can apply to a range of fairness criteria. In particular, we require the application designer to specify as joint loss function that makes explicit the trade-off between fairness and accuracy. Our reduction is shown to efficiently find the minimum loss as long as the objective has a certain natural monotonicity property which may be of independent interest in the study of fairness in algorithms.}, author = {Cynthia Dwork and Nicole Immorlica and Adam Kalai and Max Leiserson} } @article {1200616, title = {Differentially Private Submodular Maximization: Data Summarization in Disguise}, journal = {In Proceedings of the 34th International Conference on Machine Learning (ICML 2017)}, year = {2017}, abstract = {Many data summarization applications are captured by the general framework of submodular maximization. As a consequence, a wide range of efficient approximation algorithms have been developed. However, when such applications involve sensitive data about individuals, their privacy concerns are not automatically addressed. To remedy this problem, we propose a general and systematic study of differentially private submodular maximization. We present privacy-preserving algorithms for both monotone and non-monotone submodular maximization under cardinality, matroid, and p-extendible system constraints, with guarantees that are competitive with optimal. Along the way, we analyze a new algorithm for non-monotone submodular maximization, which is the first (even non-privately) to achieve a constant approximation ratio while running in linear time. We additionally provide two concrete experiments to validate the efficacy of these algorithms.}, author = {Marko Mitrovic and Mark Bun and Andreas Krause and Amin Karbasi} } @article {1190751, title = {Exposed! A Survey of Attacks on Private Data}, journal = {Annual Review of Statistics and Its Application (2017)}, year = {2017}, abstract = {Privacy-preserving statistical data analysis addresses the general question of protecting privacy when publicly releasing information about a sensitive dataset. A privacy attack takes seemingly innocuous released information and uses it to discern the private details of individuals, thus demonstrating that such information compromises privacy. For example, re-identification attacks have shown that it is easy to link supposedly de-identified records to the identity of the individual concerned. This survey focuses on attacking aggregate data, such as statistics about how many individuals have a certain disease, genetic trait, or combination thereof. We consider two types of attacks: reconstruction attacks, which approximately determine a sensitive feature of all the individuals covered by the dataset, and tracing attacks, which determine whether or not a target individual{\textquoteright}s data are included in the dataset.Wealso discuss techniques from the differential privacy literature for releasing approximate aggregate statistics while provably thwarting any privacy attack.}, author = {Cynthia Dwork and Smith, Adam and Thomas Steinke and Jonathan Ullman} } @article {1200511, title = {Innovations in Federal Statistics: Combining Data Sources While Protecting Privacy}, journal = {National Academies of Sciences, Engineering, and Medicine paper}, year = {2017}, abstract = {Federal government statistics provide critical information to the country and serve a key role in a democracy. For decades, sample surveys with instruments carefully designed for particular data needs have been one of the primary methods for collecting data for federal statistics. However, the costs of conducting such surveys have been increasing while response rates have been declining, and many surveys are not able to fulfill growing demands for more timely information and for more detailed information at state and local levels.}, author = {Robert M Groves and Michael E Chernew and Piet Daas and Cynthia Dwork and Ophir Frieder and Hosagrahar V Jagadish and Frauke Kreuter and Sharon Lohr and James P Lynch and Colm O{\textquoteright}Muircheartaigh and Trivellore Raghunathan and Rigobon, Roberto and Marc Rotenberg} } @article {1021631, title = {Make Up Your Mind: The Price of Online Queries in Differential Privacy.}, journal = {Proceedings of the Twenty-Eighth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)}, year = {2017}, abstract = {We consider the problem of answering queries about a sensitive dataset subject to differential privacy. The queries may be chosen adversarially from a larger set Q of allowable queries in one of three ways, which we list in order from easiest to hardest to answer:{\textbullet} Offline: The queries are chosen all at once and the differentially private mechanism answers the queries in a single batch.{\textbullet} Online: The queries are chosen all at once, but the mechanism only receives the queries in a streaming fashion and must answer each query before seeing the next query.{\textbullet} Adaptive: The queries are chosen one at a time and the mechanism must answer each query before the next query is chosen. In particular, each query may depend on the answers given to previous queries.Many differentially private mechanisms are just as efficient in the adaptive model as they are in the offline model. Meanwhile, most lower bounds for differential privacy hold in the offline setting. This suggests that the three models may be equivalent. We prove that these models are all, in fact, distinct. Specifically, we show that there is a family of statistical queries such that exponentially more queries from this family can be answered in the offline model than in the online model. We also exhibit a family of search queries such that exponentially more queries from this family can be answered in the online model than in the adaptive model. We also investigate whether such separations might hold for simple queries like threshold queries over the real line.}, url = {https://arxiv.org/abs/1604.04618}, author = {Mark Bun and Thomas Steinke and Jonathan Ullman} } @article {1259431, title = {The Price of Selection in Differential Privacy}, journal = {Proceedings of The 30th Conference on Learning Theory Conference (COLT 2017)}, year = {2017}, url = {https://arxiv.org/abs/1702.02970}, author = {Mitali Bafna and Jonathan Ullman} } @article {1027956, title = {Private Incremental Regression}, journal = {in the ACM SIGMOD/PODS Conference (PODS 2017)}, year = {2017}, author = {Kasiviswanathan, Shiva and Kobbi Nissim and Hongxia Jin} } @conference {1200651, title = {Public Policy Modeling using the DataTags Toolset}, booktitle = {European Social Policy Analysis network (ESPAnet). Israel}, year = {2017}, abstract = {We apply Tags, a framework for modeling data handling policies, to a welfare policy. The generated model is useful for assessing entitlements of specific cases, and for gaining insights into the modeled policy as a whole.}, author = {Bar-Sinai, Michael and Rotem Medzini} } @article {1200596, title = {Reidentification Risks in HIPAA Safe Harbor Data: A study of data from one environmental health study}, journal = {in Technology Science}, year = {2017}, author = {Sweeney, Latanya and Ji Su Yoo and Laura Perovich and Katherine E Boronow and Brown, Phil and Julia Green Brody} } @article {1200586, title = {Securing Dataverse with an Adapted Command Design Pattern}, journal = {in the IEEE Secure Development Conference (IEEE 2017)}, year = {2017}, author = {Gustavo Durand and Bar-Sinai, Michael and Merce Crosas} } @article {1259396, title = {Tight Lower Bounds for Differentially Private Selection}, journal = {in the 58th Annual Symposium on Foundations of Computer Science (FOCS 2017)}, year = {2017}, url = {https://arxiv.org/abs/1704.03024}, author = {Thomas Steinke and Jonathan Ullman} } @article {1200631, title = {Voter Identity Theft: Submitting Changes to Voter Registrations Online to Disrupt Elections}, journal = {in Technology Science}, year = {2017}, author = {Sweeney, Latanya and Ji Su Yoo and Jinyan Zang} } @article {1200671, title = {What{\textquoteright}s Fair?}, journal = { in the 23rd SIGKDD Conference on Knowledge Discovery and Data Mining (KDD 2017)}, year = {2017}, author = {Cynthia Dwork} }