APN paper, misc Lua

This commit is contained in:
osmarks 2024-03-29 14:35:43 +00:00
parent 70af4fb244
commit 350698a352
11 changed files with 602 additions and 0 deletions

View File

@ -0,0 +1,15 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local storage = {peripheral.find "energy_storage"}
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
while true do
for _, s in pairs(storage) do
local name = settings.get("storage_name_" .. peripheral.getName(s))
send("mc_stored_rf/" .. name, "energy stored in RF", s.getEnergy())
send("mc_capacity_rf/" .. name, "maximum capacity in RF", s.getEnergyCapacity())
end
sleep(1)
end

View File

@ -0,0 +1,21 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
local input = peripheral.wrap(settings.get "input")
local subsystem = settings.get "subsystem"
local desc = settings.get "subsystem_description"
while true do
for slot, tank in pairs(input.tanks()) do
local target = settings.get("output_" .. tank.name)
local name = settings.get("name_" .. tank.name)
if target and name then
local transfer = input.pushFluid(target, nil, tank.name)
print(target, transfer, tank.name)
send("fluid_throughput_" .. subsystem .. "/" .. name, "millibuckets of fluid from " .. desc, transfer, "inc")
end
end
sleep(1)
end

View File

@ -0,0 +1,26 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
local input = peripheral.wrap(settings.get "input")
local output = settings.get "output"
local subsystem = settings.get "subsystem"
local desc = settings.get "subsystem_description"
local namecache = {}
while true do
for slot, stack in pairs(input.list()) do
local name
if namecache[stack.name] then
name = namecache[stack.name]
else
name = input.getItemDetail(slot).displayName
namecache[stack.name] = name
end
local transfer = input.pushItems(output, slot)
print(transfer, name)
send("item_throughput_" .. subsystem .. "/" .. name, "items produced in " .. desc, transfer, "inc")
end
sleep(1)
end

View File

@ -0,0 +1,48 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local name = os.getComputerLabel()
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
local reactor
repeat reactor = peripheral.find "fissionReactorLogicAdapter" print "checking reactor..." sleep(0.5) until reactor and reactor.getCoolant
local max_burn = settings.get "max_burn"
local turbine = peripheral.find "turbineValve"
local min_burn = 0.1
local scram_latch = false
while true do
local coolant = reactor.getCoolant().amount
send("mek_reactor_coolant_mb", "coolant in reactor tank", coolant)
local coolant_capacity = reactor.getCoolantCapacity()
send("mek_reactor_coolant_capacity_mb", "coolant in reactor max capacity", coolant_capacity)
local waste = reactor.getWaste().amount
local waste_capacity = reactor.getWasteCapacity()
send("mek_reactor_waste_mb", "waste in reactor tank", waste)
send("mek_reactor_waste_capacity_mb", "waste in reactor max capacity", waste_capacity)
local fuel = reactor.getFuel().amount
local fuel_capacity = reactor.getFuelCapacity()
send("mek_reactor_fuel_mb", "fuel in reactor tank", fuel)
send("mek_reactor_fuel_capacity_mb", "fuel in reactor max capacity", fuel_capacity)
local temperature = reactor.getTemperature()
send("mek_reactor_temperature_k", "temperature of reactor", temperature)
send("mek_reactor_burn_rate_mb_t", "fuel burn rate of reactor", reactor.getActualBurnRate())
-- turbine status
local turbine_percent = turbine.getEnergy() / turbine.getEnergyNeeded()
send("mc_stored_rf/reactorturbine", "energy stored in RF", turbine.getEnergy())
send("mc_capacity_rf/reactorturbine" , "maximum capacity in RF", turbine.getEnergyNeeded())
if coolant < coolant_capacity / 2 or waste > waste_capacity / 2 or temperature > 500 then
print "SCRAM"
reactor.scram()
scram_latch = true
end
if not scram_latch then
reactor.setBurnRate(math.max(math.min((max_burn - min_burn) * (1 - turbine_percent) + min_burn, max_burn), min_burn))
if not reactor.getStatus() then reactor.activate() end
end
sleep(1)
end

View File

@ -0,0 +1,16 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
local pipe = peripheral.wrap(settings.get "pipe")
local label = settings.get "label"
local ctrl = settings.get "control_output"
local target_pressure = settings.get "target"
while true do
local pressure = pipe.getPressure()
send("pressure/" .. label, "bars of pressure in measured pipe", pressure)
rs.setOutput(ctrl, pressure < target_pressure)
sleep(1)
end

View File

@ -0,0 +1,11 @@
local modem = {peripheral.find("modem", function(_, o) return o.isWireless() end)}
local function send(...)
for _, modem in pairs(modem) do
modem.transmit(48869, 48869, {...})
end
end
local sensor = peripheral.find "environmentDetector"
while true do
send("mek_radiation_sv_h", "Mekanism radiation level (Sv/h)", sensor.getRadiationRaw())
sleep(1)
end

View File

@ -0,0 +1,84 @@
local input = peripheral.wrap "ironchests:copper_chest_0"
local drum_ri = peripheral.wrap "redstone_integrator_0"
local amaranthus_ri = peripheral.wrap "redstone_integrator_1"
local output_hydroangeas_chest = peripheral.wrap "minecraft:chest_2"
local disposal = peripheral.wrap "botania:open_crate_0"
local own_name = "turtle_0"
local aux_chest = peripheral.wrap "minecraft:chest_1"
local reqs = {
"blue",
"cyan"
}
local function manage_input_chest()
local count = {}
local slots = {}
for slot, meta in pairs(input.list()) do
if count[meta.name] then
input.pushItems(peripheral.getName(disposal), slot)
else
count[meta.name] = meta.count
slots[meta.name] = slot
end
end
local ok = true
for _, req in ipairs(reqs) do
local name = ("botania:%s_mystical_flower"):format(req)
if count[name] == nil or count[name] < 16 then
ok = false
end
end
return ok, slots
end
local function main()
while true do
local can_produce_hydroangeas, slot_map = manage_input_chest()
local dest = output_hydroangeas_chest.list()[1]
local seed_src = aux_chest.list()[2]
amaranthus_ri.setOutput("east", can_produce_hydroangeas)
if can_produce_hydroangeas and not dest or (dest and dest.count < 16) and seed_src then
amaranthus_ri.setOutput("east", true)
print "manufacturing cycle."
aux_chest.pushItems(own_name, 1, 1)
turtle.placeUp()
turtle.dropDown()
turtle.suckDown()
aux_chest.pullItems(own_name, 1)
for _, req in ipairs(reqs) do
input.pushItems(own_name, slot_map[("botania:%s_mystical_flower"):format(req)], 1)
turtle.craft()
turtle.dropDown(2)
end
aux_chest.pushItems(own_name, 2, 1)
turtle.dropDown()
turtle.suckDown()
output_hydroangeas_chest.pullItems(own_name, 1)
sleep(3)
local can_produce_hydroangeas, slot_map = manage_input_chest()
if slot_map["botania:hydroangeas"] then
print "moving from external"
output_hydroangeas_chest.pullItems(peripheral.getName(input), slot_map["botania:hydroangeas"])
end
sleep(30)
else
print "enough hydroangeas or insufficient seeds or flowers"
sleep(30)
end
end
end
local function run_drum()
while true do
if not amaranthus_ri.getOutput "east" then
print "pulsing drum"
drum_ri.setOutput("down", true)
sleep(0.1)
drum_ri.setOutput("down", false)
end
sleep(10)
end
end
parallel.waitForAll(run_drum, main)

View File

@ -0,0 +1,15 @@
local output_hydroangeas_chest = peripheral.wrap "minecraft:chest_2"
local own_name = "turtle_1"
local hh_ri = peripheral.wrap "redstone_integrator_2"
while true do
output_hydroangeas_chest.pushItems(own_name, 1, nil, 1)
local count = turtle.getItemCount()
hh_ri.setOutput("west", true)
turtle.drop()
sleep(10)
turtle.suck()
hh_ri.setOutput("west", false)
output_hydroangeas_chest.pullItems(own_name, 1, nil, 1)
sleep(30)
end

View File

@ -0,0 +1,82 @@
\documentclass[11pt]{article}
% Thanks to GPT-4-0314 for LaTeX help!
% Packages
\usepackage{geometry} % to change the page dimensions
\geometry{letterpaper}
\usepackage{graphicx} % support for graphics
\usepackage{hyperref} % hyperlinks
\usepackage{amsmath} % advanced math
\usepackage{amsfonts}
\usepackage{cite} % bibliography
\usepackage{nopageno}
\usepackage [english]{babel}
\usepackage [autostyle, english = american]{csquotes}
\MakeOuterQuote{"}
\title{Advancing Consensus: Automated Persuasion Networks for Public Belief Enhancement}
\author{osmarks.net Computational Memetics Division \\ \texttt{\href{mailto:comp.meme@osmarks.net}{comp.meme@osmarks.net}}}
\date{-1 April 2024}
\begin{document}
\maketitle
\begin{abstract}
Incorrect lay beliefs, as produced by disinformation campaigns and otherwise, are an increasingly severe threat to human civilization, as exemplified by the many failings of the public during the COVID-19 pandemic.
We propose an end-to-end system, based on application of modern AI techniques at scale, designed to influence mass sentiment in a well-informed and beneficial direction.
\end{abstract}
\section{Introduction}
In today's increasingly complex and rapidly changing world, it is challenging for people to maintain accurate knowledge about more than a small part of the world\cite{Kilov2021}\cite{Crichton2002GellMann}, but it's socially unacceptable or undesirable, and in some cases impossible, to reserve judgment and not proffer an opinion on every topic. As a direct consequence, many have incorrect beliefs, acting on which leads to negative consequences both for themselves and society in general\cite{cicero_deoratore}. This is exacerbated by the increasing prevalence of misinformation, disinformation and malinformation\cite{MaC6453} harming the public's ability to reach truth and make informed, justified decisions. In this hostile environment, attempts to enhance education in critical thinking are insufficiently timely and far-reaching, and a more direct solution is needed.
In this paper, we propose the Automated Persuasion Network, a system for deploying modern large language models (LLMs) to efficiently influence public opinions in desirable directions via social media. We develop an architecture intended to allow selective, effective changes to belief systems by exploiting social conformity.
\section{Methodology}
\subsection{Overview}
Humans derive beliefs and opinions from their perception of the beliefs and opinions of their peer group\cite{Cialdini2004}\cite{Deutsch1955ASO}, as well as a broader perception of what is presently socially acceptable, required or forbidden. Our approach relies on a Sybil attack\cite{6547122} against this social processing, executed by deploying LLMs to emulate people of similar attitudes to targets within the context of online social media platforms. While \cite{bocian2024moral} suggests that social pressure from AIs known to be AIs can be effective, we believe that persuasion by apparent humans is more robust and generalizable, especially since even the perception of automated social interaction has been known to trigger backlash or fear from a wide range of groups\cite{doi:10.1080/0144929X.2023.2276801}\cite{Yan2023}. We automatically derive strategies to affect desired beliefs indirectly, via creating social proof for other related beliefs, using a Bayesian network approach.
Naive implementations of this method involve many manual processing steps --- for instance, identification of targets, construction of personas for LLMs to emulate, and gathering data for belief causal modelling. We replace these with automated solutions based on natural language processing --- unsupervised clustering of internet users using text embeddings, direct evaluation of currently held opinions within a group using LLMs, and surveying simulacra rather than specific extant humans (as described in \cite{Argyle_2023}) --- to allow operation at scale without direct human oversight. This permits much more finely individualized targeting than used in e.g. \cite{10.1093/pnasnexus/pgae035} without additional human labour.
\subsection{Segmentation}
In order to benefit from the effectiveness of persuasive strategies optimized for individuals while still having enough data for reasonable targeting, we apply standard unsupervised clustering techniques. We acquire profile information and a social graph (of friendships and interactions) for all relevant social media accounts, generate text embeddings from each user's profile information, as well as a representative sample of their publicly accessible posts, and combine this with graph embeddings to generate a unified representation. We then apply the OPTICS clustering algorithm\cite{DBLP:conf/sigmod/AnkerstBKS99} to generate a list of clusters.
From these, several pieces of information need to be extracted. We identify the accounts closest to the cluster's centroid and take them as exemplars, and additionally compute the distribution of posting frequency and timings. We use these in later stages to ensure that our personas cannot be distinguished via timing side-channels. Additionally, we generate a set of personas using a variant of QDAIF\cite{bradley2023qualitydiversity}, with a standard instruction-tuned LLM (IT-LLM) used to mutate samples, using the cluster exemplars as the initial seed. As a quality metric, we ask the IT-LLM to evaluate the realism of a persona and its alignment with the exemplars, and we organize our search space into bins using k-means clustering on the generated user sentence embeddings to ensure coverage of all persona types within a cluster.
\subsection{Analysis}
We use a variant of \cite{powell2018}'s methodology to tune persuasion strategies to audiences to effectively affect target beliefs. We replace their manual identification and belief measurement step by using the IT-LLM to first generate a set of beliefs that relate to and/or could plausibly cause the target belief, as well as scales for measuring adherence to these possible beliefs. For measurement, rather than using the IT-LLM as before, we apply a prompt-engineered non-instruction-tuned model (also known as a foundation model, base model or pretrained language model (PT-LLM)). This is because instruction-tuned LLMs are frequently vulnerable to the phenomenon of mode collapse\cite{mysteriesofmodecollapse}\cite{hamilton2024detecting}, in which models fail to generalize over latent variables such as authorship of text. This is incompatible with our need to faithfully simulate a wide range of internet users. Instruction-tuned LLMs are also unsuitable for direct internet-facing deployment, due to the risk of prompt injection\cite{perez2022ignore}. Within each cluster, we use the acquired representative text from each exemplar from the segmentation stage to condition the LLM generations, and then ask several instances the generated questions in a random order. Multiple separate sampling runs are necessary due to the "simulator" nature of LLMs\cite{Shanahan2023}: our persona may not fully constrain its model to a single person with consistent beliefs. Runs producing responses that cannot be parsed into valid responses are discarded.
Given this synthetic data on belief prevalence, we apply a structure learning algorithm to infer causality --- which beliefs cause other beliefs. Unlike \cite{powell2018}, we do not incorporate any prior structure from theory --- due to the additional complexity of applying theories in our automated pipeline, and since our requirements lean more toward predictive accuracy than human interpretability --- and instead apply their BDHC algorithm to generate many candidate graphs, selecting a final model based on a weighted combination of model complexity (number of edges) and likelihood, to combat overfitting.
We then select the beliefs with the greatest estimated contribution to our target belief and direct the IT-LLM to modify our generated personas with the necessary belief adjustment. Due to the aforementioned mode collapse issues, we apply rejection sampling, discarding any generated personas that diverge too far from their original forms (as measured by semantic embedding distance) and regenerating. The resulting personas are used in the next stage.
\subsection{Interaction}
After the completion of the previous stages, the Automated Persuasion Network must interact with humans to cause belief updates. This step requires large-scale inference: however, as most human communication is simple and easy to model, at least over short contexts, we are able to use standard low-cost consumer GPUs running open-weight PT-LLMs, using the vLLM\cite{kwon2023efficient} inference server. As an additional cost-saving measure, we use a multi-tiered system whereby generations are initially run on a small model and, if too complex for it (as measured by perplexity), rerun using a more capable language model.
We use the belief-modified personas generated in the Analysis stage, and attempt to have each of them mimic the actions of a human user in their cluster as much as possible. We identified a number of challenges. Most notably, nonhuman users are frequently detected using posting frequency\cite{howard2016bots} and timings \cite{Duh2018CollectiveBehavior}\cite{PAN2016193}. By using a fairly large set of accounts rather than a single bot, we can avoid detection based on simply noticing anomalously high posting frequencies, and by scheduling generation of new posts and conditionally replying to other users' posts in accordance with cluster statistics for such gathered during the Segmentation stage we can prevent easy timing-based detection. We have not yet identified a complete strategy for avoiding social-graph-based detection such as \cite{6547122}: our present best mitigation is to deploy new personas slowly and to maintain the rate of interaction between them at the base rate within the cluster.
Other difficulties involve technical countermeasures in use against nonhuman users, such as CAPTCHAs and limited APIs. However, while today's most sophisticated CAPTCHAs exceed current AI capabilities, commercial services are available to dispatch solving to humans at very low cost. We are able to mitigate other limitations with the use of commercial residential proxy services and browser automation software for scraping.
\subsection{Monitoring}
In order to determine the efficacy of our approach, we periodically sample posts from human users within each cluster and apply the IT-LLM to rate how much each post entails our target beliefs, allowing measurement of belief change over time.
\section{Results}
No results are available for release at this time.
\section{Discussion}
We believe our architecture represents a major advance in misinformation prevention and public attitude alignment. A promising future direction for research we have identified is introduction of technical enhancements such as implementation of speculative decoding in post generation, as well as use of vision/language models such as \cite{liu2023improved} to allow interaction with multimodal content. We also suggest integration of concepts from LLM agents to reduce distinguishability from humans --- for instance, personas could be given the ability to create new posts based on newly released news articles or information from other social media sites. Finally, while we have primarily focused on human emulation with some limited optimization of persuasive strategies, future AI technology is likely to be capable of more powerful direct persuasion.
% References Section
\bibliographystyle{apalike}
\bibliography{references} % references.bib contains your bibliography
\end{document}

View File

@ -0,0 +1,284 @@
@inproceedings{powell2018,
title={Articulating lay theories through graphical models: A study of beliefs surrounding vaccination decisions},
author={Powell, Derek and Weisman, Kara and Markman, Ellen M},
year={2018},
booktitle={Proceedings of the 40th Annual Conference of the Cognitive Science Society}
}
@article{Argyle_2023,
title={Out of One, Many: Using Language Models to Simulate Human Samples},
volume={31},
ISSN={1476-4989},
url={http://dx.doi.org/10.1017/pan.2023.2},
DOI={10.1017/pan.2023.2},
number={3},
journal={Political Analysis},
publisher={Cambridge University Press (CUP)},
author={Argyle, Lisa P. and Busby, Ethan C. and Fulda, Nancy and Gubler, Joshua R. and Rytting, Christopher and Wingate, David},
year={2023},
month=feb, pages={337351}
}
@misc{bradley2023qualitydiversity,
title={Quality-Diversity through AI Feedback},
author={Herbie Bradley and Andrew Dai and Hannah Teufel and Jenny Zhang and Koen Oostermeijer and Marco Bellagente and Jeff Clune and Kenneth Stanley and Grégory Schott and Joel Lehman},
year={2023},
eprint={2310.13032},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@article{10.1093/pnasnexus/pgae035,
author = {Simchon, Almog and Edwards, Matthew and Lewandowsky, Stephan},
title = "{The persuasive effects of political microtargeting in the age of generative artificial intelligence}",
journal = {PNAS Nexus},
volume = {3},
number = {2},
pages = {pgae035},
year = {2024},
month = {01},
abstract = "{The increasing availability of microtargeted advertising and the accessibility of generative artificial intelligence (AI) tools, such as ChatGPT, have raised concerns about the potential misuse of large language models in scaling microtargeting efforts for political purposes. Recent technological advancements, involving generative AI and personality inference from consumed text, can potentially create a highly scalable “manipulation machine” that targets individuals based on their unique vulnerabilities without requiring human input. This paper presents four studies examining the effectiveness of this putative “manipulation machine.” The results demonstrate that personalized political ads tailored to individuals’ personalities are more effective than nonpersonalized ads (studies 1a and 1b). Additionally, we showcase the feasibility of automatically generating and validating these personalized ads on a large scale (studies 2a and 2b). These findings highlight the potential risks of utilizing AI and microtargeting to craft political messages that resonate with individuals based on their personality traits. This should be an area of concern to ethicists and policy makers.}",
issn = {2752-6542},
doi = {10.1093/pnasnexus/pgae035},
url = {https://doi.org/10.1093/pnasnexus/pgae035},
eprint = {https://academic.oup.com/pnasnexus/article-pdf/3/2/pgae035/56683743/pgae035.pdf},
}
@misc{liu2024prompt,
title={Prompt Injection attack against LLM-integrated Applications},
author={Yi Liu and Gelei Deng and Yuekang Li and Kailong Wang and Zihao Wang and Xiaofeng Wang and Tianwei Zhang and Yepang Liu and Haoyu Wang and Yan Zheng and Yang Liu},
year={2024},
eprint={2306.05499},
archivePrefix={arXiv},
primaryClass={cs.CR}
}
@misc{perez2022ignore,
title={Ignore Previous Prompt: Attack Techniques For Language Models},
author={Fábio Perez and Ian Ribeiro},
year={2022},
eprint={2211.09527},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{hamilton2024detecting,
title={Detecting Mode Collapse in Language Models via Narration},
author={Sil Hamilton},
year={2024},
eprint={2402.04477},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@online{mysteriesofmodecollapse,
author = {janus},
title = {Mysteries of mode collapse},
year = {2022},
url = {https://www.lesswrong.com/posts/t9svvNPNmFf5Qa3TA/mysteries-of-mode-collapse},
note = {Accessed: 2022-11-08}
}
@article{bocian2024moral,
title={Moral conformity in a digital world: Human and nonhuman agents as a source of social pressure for judgments of moral character},
author={Bocian, Konrad and Gonidis, Lazaros and Everett, Jim A C},
journal={PloS one},
volume={19},
number={2},
pages={e0298293},
year={2024},
publisher={Public Library of Science},
doi={10.1371/journal.pone.0298293},
pmid={38358977},
}
@inproceedings{DBLP:conf/sigmod/AnkerstBKS99,
author = {Mihael Ankerst and Markus M. Breunig and Hans-Peter Kriegel and J{\"o}rg Sander},
editor = {Alex Delis and Christos Faloutsos and Shahram Ghandeharizadeh},
title = {OPTICS: Ordering Points To Identify the Clustering Structure},
booktitle = {SIGMOD 1999, Proceedings ACM SIGMOD International Conference on Management of Data, June 1-3, 1999, Philadelphia, Pennsylvania, USA},
publisher = {ACM Press},
year = {1999},
isbn = {1-58113-084-8},
pages = {49-60},
ee = {http://doi.acm.org/10.1145/304182.304187, db/conf/sigmod/AnkerstBKS99.html},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
@misc{howard2016bots,
title={Bots, \#StrongerIn, and \#Brexit: Computational Propaganda during the UK-EU Referendum},
author={Philip N. Howard and Bence Kollanyi},
year={2016},
eprint={1606.06356},
archivePrefix={arXiv},
primaryClass={cs.SI}
}
@article{doi:10.1080/0144929X.2023.2276801,
author = {Wei Fang and Chen Nie},
title = {Social media use, social bot literacy, perceived threats from bots, and perceived bot control: a moderated-mediation model},
journal = {Behaviour \& Information Technology},
volume = {0},
number = {0},
pages = {1-17},
year = {2023},
publisher = {Taylor & Francis},
doi = {10.1080/0144929X.2023.2276801},
URL = {https://doi.org/10.1080/0144929X.2023.2276801},
eprint = {https://doi.org/10.1080/0144929X.2023.2276801}
}
@article{Yan2023,
author = {Harry Yaojun Yan and Kai-Cheng Yang and James Shanahan and Filippo Menczer},
title = {Exposure to social bots amplifies perceptual biases and regulation propensity},
journal = {Scientific Reports},
volume = {13},
number = {1},
pages = {20707},
year = {2023},
date = {2023/11/24},
doi = {10.1038/s41598-023-46630-x},
url = {https://doi.org/10.1038/s41598-023-46630-x},
issn = {2045-2322}
}
@article{Shanahan2023,
author = {Shanahan, Murray and McDonell, Kyle and Reynolds, Laria},
title = {Role play with large language models},
journal = {Nature},
volume = {623},
number = {7987},
pages = {493-498},
year = {2023},
doi = {10.1038/s41586-023-06647-8},
url = {https://doi.org/10.1038/s41586-023-06647-8},
date = {2023/11/01},
issn = {1476-4687},
}
@article{Duh2018CollectiveBehavior,
author = {Duh, Andrej and Rupnik, Marjan Slak and Korošak, Dean},
title = {Collective Behavior of Social Bots Is Encoded in Their Temporal Twitter Activity},
journal = {Big Data},
volume = {6},
number = {2},
pages = {113--123},
year = {2018},
doi = {10.1089/big.2017.0041},
url = {http://doi.org/10.1089/big.2017.0041},
month = jun
}
@inproceedings{kwon2023efficient,
title={Efficient Memory Management for Large Language Model Serving with PagedAttention},
author={Woosuk Kwon and Zhuohan Li and Siyuan Zhuang and Ying Sheng and Lianmin Zheng and Cody Hao Yu and Joseph E. Gonzalez and Hao Zhang and Ion Stoica},
booktitle={Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles},
year={2023}
}
@INPROCEEDINGS{6547122,
author={Alvisi, Lorenzo and Clement, Allen and Epasto, Alessandro and Lattanzi, Silvio and Panconesi, Alessandro},
booktitle={2013 IEEE Symposium on Security and Privacy},
title={SoK: The Evolution of Sybil Defense via Social Networks},
year={2013},
volume={},
number={},
pages={382-396},
keywords={Protocols;Communities;Image edge detection;Facebook;Robustness;Detection algorithms},
doi={10.1109/SP.2013.33}
}
@article{PAN2016193,
title = {Discriminating bot accounts based solely on temporal features of microblog behavior},
journal = {Physica A: Statistical Mechanics and its Applications},
volume = {450},
pages = {193-204},
year = {2016},
issn = {0378-4371},
doi = {https://doi.org/10.1016/j.physa.2015.12.148},
url = {https://www.sciencedirect.com/science/article/pii/S0378437116000388},
author = {Junshan Pan and Ying Liu and Xiang Liu and Hanping Hu},
keywords = {Human dynamics, Microblog, Behavior patterns, Bots, Burstiness, Entropy},
}
@misc{liu2023improved,
title={Improved Baselines with Visual Instruction Tuning},
author={Haotian Liu and Chunyuan Li and Yuheng Li and Yong Jae Lee},
year={2023},
eprint={2310.03744},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@article{Cialdini2004,
title={Social influence: compliance and conformity},
author={Cialdini, Robert B and Goldstein, Noah J},
journal={Annual Review of Psychology},
volume={55},
pages={591--621},
year={2004},
doi={10.1146/annurev.psych.55.090902.142015},
PMID={14744228},
PMCID={N/A},
ISSN={0066-4308},
language={English},
publisher={NLM},
status={MEDLINE},
date_completed={20040414},
last_revision_date={20220311},
type={Journal Article, Research Support, U.S. Gov't, Non-P.H.S., Review},
place_of_publication={United States}
}
@article{Deutsch1955ASO,
title={A study of normative and informational social influences upon individual judgement.},
author={Morton Deutsch and Harold Benjamin Gerard},
journal={Journal of Abnormal Psychology},
year={1955},
volume={51 3},
pages={629-36},
url={https://pubmed.ncbi.nlm.nih.gov/13286010/}
}
@misc{Crichton2002GellMann,
author = {Crichton, Michael},
title = {Why Speculate?},
year = {2002},
note = {Speech presented at the International Leadership Forum, La Jolla, California, April 26},
howpublished = {\url{https://web.archive.org/web/20070714204136/http://www.michaelcrichton.net/speech-whyspeculate.html}},
}
@article{Kilov2021,
author = {Kilov, Daniel},
title = {The brittleness of expertise and why it matters},
journal = {Synthese},
volume = {199},
number = {1},
pages = {3431-3455},
year = {2021},
doi = {10.1007/s11229-020-02940-5},
date = {2021/12/01},
}
@article{MaC6453,
author = {Marta Pérez-Escolar and Darren Lilleker and Alejandro Tapia-Frade},
title = {A Systematic Literature Review of the Phenomenon of Disinformation and Misinformation},
journal = {Media and Communication},
volume = {11},
number = {2},
year = {2023},
keywords = {credibility; disinformation; fake news; falsehood; hoaxes; misinformation; truth},
issn = {2183-2439},
pages = {76--87},
doi = {10.17645/mac.v11i2.6453},
url = {https://www.cogitatiopress.com/mediaandcommunication/article/view/6453}
}
@book{cicero_deoratore,
author = {Cicero, Marcus Tullius},
title = {On the Ideal Orator},
year = {2001},
publisher = {Oxford University Press},
isbn = {0-19-509197-3},
translator = {May, James M. and Wisse, Jakob}
}