\pdfoutput=1
\documentclass[11pt]{article}
\usepackage{times}
\usepackage{latexsym}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{microtype}
\usepackage{inconsolata}
\usepackage{bussproofs}
\usepackage{amsmath}
\usepackage{amssymb, mathrsfs}
\usepackage{tikz}
\usepackage{pgfplots}
\usepackage{subcaption}
\usepackage{tikz-dependency}
\pgfplotsset{compat=1.17}
\usetikzlibrary{positioning}
\newcommand{\singleprop}{s_{p}}
\newcommand{\singlepred}{s_{q}}
\newcommand{\grouppred}{g_{q}}
\newcommand{\groupprop}{g_{p}}
\newcommand{\inference}{\ell_{gsr}}
\newcommand{\singlepropi}[1]{s_{p,#1}}
\newcommand{\implicationpred}{(g_p, s_p, (r_g, r_p))}
\newcommand{\backlinks}{\textsc{backward}_\Phi}
\newcommand{\forwardlinks}{\textsc{forward}_\Phi}
\newcommand{\propgraph}{\Phi}
\newcommand{\propgraphs}{\Phi(\singleprop)}
\newcommand{\fnname}{\mathscr{F}}
\newcommand{\argset}{\mathcal{A}}
\newcommand{\argmap}{\left\{(r, a)\right\}}
\newcommand{\andsign}{\textbf{\em and}}
\newcommand{\orsign}{\textsc{Or}}
\newcommand{\constant}[1]{{\bf c}_{#1}}
\newcommand{\variable}[1]{{\bf x}_{#1}}
\newcommand{\type}[1]{\tau_{#1}}
\newcommand{\xvariable}{{\bf x}}
\newcommand{\zvariable}{{\bf z}}
\newcommand{\pconstant}{{\bf p}}
\newcommand{\pvariable}{{\bf p}}
\newcommand{\qvariable}{{\bf q}}
\newcommand{\gvariable}{{\bf g}}
\newcommand{\wvariable}{{\bf w}}
\newcommand{\condsep}{\ |\ }
\newcommand{\varmask}{\textsc{mask}}
\newcommand{\roleset}{\left\{r_s\right\}}
\newcommand{\rolemap}{\left\{r_a, r_c\right\}}
\newcommand{\xjack}{\xvariable_{jack}}
\newcommand{\xjill}{\xvariable_{jill}}
\newcommand{\opand}{\textbf{\em and}}
\newcommand{\opor}{\textbf{\em or}}
\newcommand{\opxor}{\textbf{\em xor}}
\newcommand{\psiall}{\Psi_\forall}
\newcommand{\psiand}{\Psi_\opand}
\newcommand{\psior}{\Psi_\opor}
\newcommand{\subj}{\textsc{subj}}
\newcommand{\dobj}{\textsc{dobj}}
\newcommand{\iobj}{\textsc{iobj}}
\title{\bf The Quantified Boolean Bayesian Network \\ 
 \textmd{Theory and Experiments}
 \thanks{The author acknowledges the use of {\em ChatGPT} in the preparation of this work, for research, review and the production of many equations.}
 }
\author{
    {\Large Greg Coppola} \\
    {\em coppola.ai} \\
    Research. Develop. Meme.
}
\date{\today}
\begin{document}
\maketitle
\section{Contributions}
We introduce the {\bf Quantified Boolean Bayesian Network}, {\em QBBN} for short, a model from the {\em Bayesian Network} family, constructed and analyzed to provide a {\em unified view} of {\em logical} and {\em statistical} {\em reasoning}.
In particular, our work makes the following contributions:
\begin{itemize}
    \item {\bf Unified Model of Logical and Probabilistic Reasoning} \\ 
        We provide a single data structure, the {\em QBBN}, which can both:
        \begin{itemize}
            \item {\em reason probabilistically}, and answer {\em probabilistic queries}
            \item support arbitrarily complex {\em logical reasoning}, by fitting into a larger {\em consistent} and {\em complete} {\em logical deduction system}
        \end{itemize}
        We achieve this by integrating the {\em Bayesian Network}, a {\em graphical statistical model}, into the {\em natural deduction calculus} \cite{PrawitzNaturalDeduction} from {\em formal logic} to provide a {\em complete} and {\em consistent} calculus, of which the {\em QBBN} forms a precisely defined part.
    \item {\bf A Generative Model Without Hallucinations} \\
        The {\em QBBN} shows how to create a {\em generative} model of the ({\em latent logical forms} underlying) unlabeled text.
        Like the {\em large language model} \cite{Bahdanau2014NeuralMT, vaswani2017attention, radford2018improving}, the {\em QBBN} it is generative, and so can be used to {\em compress} the data \cite{SutskeverObservation}.
        But, the {\em QBBN} does {\em not} {\em hallucinate}.
        It reasons consistently (i.e., ensuring that $P(x) + P(\neg x) = 1$ for all questions $x$), and can {\em explain} its reasoning in terms of {\em causality}, like any Bayesian Network can.
    \item {\bf Very Efficient Bayesian Inference} \\
        In general, inference in a Bayesian Network is intractable, i.e. $\Omega(2^N)$ for $N$ random variables.
        Our division of Bayesian Network nodes into \opand\ and \opor\ {\em boolean  gates}, along with our use of approximate {\em iterative belief propagation} means that {\em inference} can now be not only tractable, but {\em very efficient}, with one full pass of approximate belief propagation requiring time $O(Nn)$, where $N$ is the number of variables, and $n$ is the maximum incoming connections in an \opor\ gate. (However, the convergence of {\em iterative belief propagation} must be studied further.)
    \item {\bf Fast Versus Slow Thinking} \\
        We give, to our knowledge, the first mathematical {\em explanation} of the distinction between what has come to be known as {\em fast} versus {\em slow} thinking \cite{Kahneman2011ThinkingFast}.
        This explanation is based on {\em proof theory} of the {\em natural deduction calculus}, and accords both with our graphical formulation, as well human experience. 
        As a special case of general reasoning, we analyze {\em planning}.
    \item {\bf Calculus Over Dependency Trees} \\
        Empirically, {\em labeled dependnecy trees} are the easiest {\em syntactic formalism} to parse to.
        Traditionally, parsing language to a {\em complete} and {\em consistent} calculus required using the {\em first-order logic} calculus \cite{Steedman1996}, but this translation adds complexity, e.g. by requiring additional {\em  syntactic structure},
        to implementation, inference and learning.  We show how complete and consistent deduction can be done directly over labeled dependency trees, to avoid
        the translation to {\em first-order logic}, and so vastly simplify both learning and inference pipelines.
\end{itemize}
\begin{thebibliography}{}
\bibitem[Bahdanau et~al., 2014]{Bahdanau2014NeuralMT}
Bahdanau, D., Cho, K., and Bengio, Y. (2014).
\newblock Neural machine translation by jointly learning to align and
  translate.
\newblock {\em CoRR}, abs/1409.0473.
\bibitem[Kahneman, 2011]{Kahneman2011ThinkingFast}
Kahneman, D. (2011).
\newblock {\em Thinking, Fast and Slow}.
\newblock Farrar, Straus and Giroux, New York.
\bibitem[Prawitz, 1965]{PrawitzNaturalDeduction}
Prawitz, D. (1965).
\newblock {\em Natural Deduction: A Proof-Theoretical Study}.
\newblock Stockholm Studies in Philosophy 3. Almqvist \& Wiksell, Stockholm;
  Göteborg; Uppsala.
\newblock Acta Universitatis Stockholmiensis.
\bibitem[Radford et~al., 2018]{radford2018improving}
Radford, A., Narasimhan, K., Salimans, T., and Sutskever, I. (2018).
\newblock Improving language understanding by generative pre-training.
\bibitem[Steedman, 1996]{Steedman1996}
Steedman, M. (1996).
\newblock {\em Surface Structure and Interpretation}.
\newblock The MIT Press.
\bibitem[Sutskever, 2023]{SutskeverObservation}
Sutskever, I. (2023).
\newblock An observation on generalization.
\newblock YouTube video.
\newblock Accessed: 2024-01-29.
\bibitem[Vaswani et~al., 2017]{vaswani2017attention}
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.~N.,
  Kaiser, L., and Polosukhin, I. (2017).
\newblock Attention is all you need.
\newblock In {\em Advances in Neural Information Processing Systems},
  volume~30.
\end{thebibliography}
\end{document}