@article{Sun_2023, title={Regulating Algorithmic Disinformation}, volume={46}, url={https://journals.library.columbia.edu/index.php/lawandarts/article/view/11237}, DOI={10.52214/jla.v46i3.11237}, abstractNote={<p>Disinformation is endemic in the digital age, seriously harming the public interest in democracy, health care, and national security. Increasingly, disinformation is created and disseminated by social media algorithms. Algorithmic disinformation, a new phenomenon, thus looms large in contemporary society. Recommendation algorithms are driving the spread of disinformation on social media networks, and generative algorithms are creating deepfakes, both at unprecedented levels. The regulation of algorithmic disinformation is therefore one of today’s thorniest legal problems.</p> <p>Against this backdrop, this Article proposes a novel approach to regulating algorithmic disinformation effectively. It first explores why transparency, intelligibility, and accountability should be adopted as the three major principles of the legal regulation of algorithmic disinformation. Because of its market-based technology development and regulation policy, the United States has yet to adopt any laws regulating algorithmic disinformation, let alone these three principles. The Article then examines legislative reforms in France and China, where the three principles have been translated into legal rules requiring social media companies to disclose their disinformation-related algorithms, render them intelligible to users, and assume legal responsibility for curbing the spread of disinformation on their platforms.</p> <p>Based on a critical discussion of the major problems with these legal rules, the Article puts forward a multi-stakeholder approach to better implement the three principles. It argues that the United States should take the lead in creating and piloting an algorithmic disinformation review system. This new system would empower the administrative oversight of algorithmic disinformation and promote the dynamic engagement of social media users and experts in policing algorithms that generate and disseminate disinformation. The ADRS would thus promote the transparency and intelligibility of algorithms and hold social media platforms accountable for curbing disinformation.</p>}, number={4}, journal={The Columbia Journal of Law & the Arts}, author={Sun, Haochen}, year={2023}, month={May}, pages={367–417} }