Date: Sat, 30 Dec 2017 19:56:07 0500
Subject: [PATCH] src/axiomwebsite/index.html Acknowledge CMU support
Goal: Acknowledge CMU support

changelog  2 +
patch  603 +
src/axiomwebsite/index.html  15 +
src/axiomwebsite/patches.html  2 +
4 files changed, 20 insertions(+), 602 deletions()
diff git a/changelog b/changelog
index c85c9df..c643b2c 100644
 a/changelog
+++ b/changelog
@@ 1,3 +1,5 @@
+20171230 tpd src/axiomwebsite/patches.html 20171230.02.tpd.patch
+20171230 tpd src/axiomwebsite/index.html Acknowledge CMU support
20171230 tpd src/axiomwebsite/patches.html 20171230.01.tpd.patch
20171230 tpd books/bookvol13 next draft
20171224 tpd src/axiomwebsite/patches.html 20171224.01.tpd.patch
diff git a/patch b/patch
index 336fea0..2fe3f23 100644
 a/patch
+++ b/patch
@@ 1,603 +1,4 @@
books/bookvol13 next draft
+src/axiomwebsite/index.html Acknowledge CMU support
Goal: Proving Axiom Correct
+Goal: Acknowledge CMU support
\index{Blakley, Bob}
\begin{chunk}{axiom.bib}
@inproceedings{Blak96,
 author = "Blakley, Bob",
 title = {{The Emperor's Old Armor}},
 booktitle = "Proc. 1996 New Security Paradigms Workshop",
 publisher = "ACM",
 year = "1996",
 paper = "Blak96.pdf"
}

\end{chunk}

\index{Brooks, Frederick P.}
\begin{chunk}{axiom.bib}
@article{Broo87,
 author = "Brooks, Frederick P.",
 title = {{No Silver Bullet: Essence and Accidents of Software Engineering}},
 journal = "IEEE Computer",
 volume = "20",
 number = "4",
 pages = "1019",
 year = "1987",
 abstract =
 "Fashioning complex conceptual constructs is the essence; accidental
 tasks arise in representing the constructs in language. Past progress
 has so reduced the accidental tasks that future progress now depends
 upon addressing the essence.",
 paper = "Broo87.pdf"
}

\end{chunk}

\index{Glass, Robert L.}
\begin{chunk}{axiom.bib}
@article{Glas94,
 author = "Glass, Robert L.",
 title = {{The SoftwareResearch Crisis}},
 journal = "IEEE Software",
 volume = "11",
 number = "6",
 year = "1994",
 abstract =
 "With the advantage of more than 25 years' hindsight, this twentyfirst
 century author looks askance at the 'crisis' in software practice and
 expresses deep concern for a crisis in software research.",
 paper = "Glas94.pdf"
}

\end{chunk}

\index{Fenton, Norman}
\begin{chunk}{axiom.bib}
@article{Fent93,
 author = "Fenton, Norman",
 title = {{How Effective Are Software Engineering Methods}},
 journal = "J. Systems Software",
 volume = "22",
 pages = "141148",
 year = "1993",
 abstract =
 "For 25 years, software engineers have sought methods which they hope
 can provide a technological fix for the “software crisis.” Proponents
 of specific methods claim that their use leads to significantly
 improved quality and productivity. Such claims are rarely, if ever,
 backed up by hard evidence. We show that often, where real empirical
 evidence does exist, the results are counter to the views of the
 socalled experts. We examine the impact on the software industry of
 continuing to place our trust in unproven, and often revolutionary,
 methods. The very poor state of the art of empirical assessment in
 software engineering can be explained in part by inappropriate or
 inadequate use of measurement. Numerous empirical studies are flawed
 because of their poor experimental design and lack of adherence to
 proper measurement principles.",
 paper = "Fent93.pdf"
}

\end{chunk}

\index{Lindsay, Peter A.}
\begin{chunk}{axiom.bib}
@article{Lind88,
 author = "Lindsay, Peter A.",
 title = {{A Survery of Mechanical Support for Formal Reasoning}},
 journal = "Software Engineering Journal",
 volume = "3",
 number = "1",
 year = "1988",
 pages = "327",
 paper = "Lind88.pdf"
}

\end{chunk}

\index{MartinL\"of, P.}
\begin{chunk}{axiom.bib}
@inproceedings{Mart79,
 author = "MartinLof, P.",
 title = {{Constructive Mathematics and Computer Programming}},
 booktitle = "Proc. 6th Int. Congress for Logic, Methodology and
 Philosophy of Science",
 publisher = "NorthHolland",
 year = "1979",
 pages = "153179",
 abstract =
 "If programming is understood not as the writing of instructions for
 this or that computing machine but as the design of methods of
 computation that it is the computer's duty to execute (a difference
 that Dijkstra has referred to as the difference between computer
 science and computing science), then it no longer seems possible to
 distinguish the discipline of programming from constructive
 mathematics. This explains why the intuitionistic theory of types,
 which was originally developed as a symbolism for the precise
 codification of constructive mathematics, may equally well be viewed
 as a programming language. As such it provides a precise notation not
 only, like other programming languages, for the programs themselves
 but also for the tasks that the programs are supposed to
 perform. Moreover, the inference rules of the theory of types, which
 are again completely formal, appear as rules of correct program
 synthesis. Thus the correctness of a program written in the theory of
 types is proved formally at the same time as it is being
 synthesized.",
 paper = "Mart79.pdf"
}

\end{chunk}

\index{MartinL\"of, P.}
\begin{chunk}{axiom.bib}
@inproceedings{Mart73,
 author = "MartinLof, P.",
 title = {{An Intuitionistic Theory of Types}},
 booktitle = "Logic Colloquium 1973",
 publisher = "H.E. Rose and J.G. Shepherdson",
 pages = "73118",
 year = "1973",
 abstract =
 "The theory of types with which we shall be concerned is intended to
 be a full scale system for foralizing intuitionistic mathematics as
 developed, for example, in the book by Bishop 1967. The language of
 the theory is richer than the language of first order predicate
 logic. This makes it possible to strengthen the axioms for existence
 and disjunction. In the case of existence, the possibility of
 strengthening the usual elimination rule seems first to have been
 indicated by Howard 1969, whose proposed axioms are special cases of
 the existential elimination rule of the present theory. Furthermore,
 there is a reflection principle which links the generation of objects
 and types and plays somewhat the same role for the present theory as
 does the replacement axiom for ZermeloFrankel set theory.

 An earlier, not yet conclusive, attempt at formulating a theory of
 this kind was made by Scott 1970. Also related, although less closely,
 are the type and logic free theories of consructions of Kreisel 1962
 and 1965 and Goodman 1970.

 In its first version, the present theory was based on the strongly
 impredicative axiom that there is a type of all types whatsoever,
 which is at the same time a type and an object of that type. This
 axiom had to be abandoned, however, after it was shown to lead to a
 contradiction by Jean Yves Girard. I am very grateful to him for
 showing me his paradox. The change that it necessitated is so drastic
 that my theory no longer contains intuitionistic simple type theory as
 it originally did. Instead, its proof theoretic strength should be
 close to that of predicative analysis.",
 paper = "Mart73.pdf"
}

\end{chunk}

\index{Bates, Joseph L.}
\index{Constable, Robert L.}
\begin{chunk}{axiom.bib}
@article{Bate85,
 author = "Bates, Joseph L. and Constable, Robert L.",
 title = {{Proofs as Programs}},
 journal = "ACM TOPLAS",
 volume = "7",
 number = "1",
 year = "1985",
 abstract =
 "The significant intellectual cost of programming is for problem
 solving and explaining, not for coding. Yet programming systems offer
 mechanical assistance for the coding process exclusively. We
 illustrate the use of an implemented program development system,
 called PRL ('pearl'), that provides automated assistance with the
 difficult part. The problem and its explained solution are seen as
 formal objects in a constructive logic of the data domains. These
 formal explanations can be executed at various stages of
 completion. The most incomplete explanations resemble applicative
 programs, the most complete are formal proofs.",
 paper = "Bate85.pdf"
}

\end{chunk}

\index{Coquand, Thierry}
\index{Huet, Gerard}
\begin{chunk}{axiom.bib}
@article{Coqu85,
 author = "Coquand, Thierry and Huet, Gerard",
 title = {{Constructions: A Higher Order Proof System for Mechanizing
 Mathematics}},
 journal = "LNCS",
 volume = "203",
 pages = "151184",
 year = "1985",
 abstract =
 "We present an extensive set of mathematical propositions and proofs
 in order to demonstrate the power of expression of the theory of
 constructions.",
 paper = "Coqu85.pdf"
}

\end{chunk}

\index{Calude, C.S.}
\index{Calude, E.}
\index{Marcus, S.}
\begin{chunk}{axiom.bib}
@techreport{Calu07,
 author = "Calude, C.S. and Calude, E. and Marcus, S.",
 title = {{Proving and Programming}},
 type = "technical report",
 institution = "Centre for Discrete Mathematics and Theoretical Computer
 Science",
 number = "CDMTCS309",
 year = "2007",
 abstract =
 "There is a strong analogy between proving theorems in mathematics and
 writing programs in computer science. This paper is devoted to an
 analysis, from the perspective of this analogy, of proof in
 mathematics. We will argue that while the Hilbertian notion of proof
 has few chances to change, future proofs will be of various types,
 will play different roles, and their truth will be checked
 differently. Programming gives mathematics a new form of
 understanding. The computer is the driving force behind these
 changes.",
 paper = "Calu07.pdf"
}

\end{chunk}

\index{Fass, Leona F.}
\begin{chunk}{axiom.bib}
@article{Fass04,
 author = "Fass, Leona F.",
 title = {{Approximations, Anomalies and ``The Proof of Correctness Wars''}},
 journal = "ACM SIGSOFT Software Engineering Notes",
 volume = "29",
 number = "2",
 year = "2004",
 abstract =
 "We discuss approaches to establishing 'correctness' and describe the
 usefulness of logicbased model checkers for producing better
 practical system designs. While we could develop techniques for
 'constructing correctness' in our theoretical behavioralmodeling
 research, when applied to Real World processes such as software
 development only approximate correctness might be established and
 anomalous behaviors subsequently found. This we view as a positive
 outcome since resultant adaptation, or flaw detection and correction,
 may lead to improved development and designs. We find researchers
 employing model checking as a formal methods tool to develop empirical
 techniques have reached similar conclusions. Thus we cite some
 applications of model checking to generate tests and detect defects in
 such Real World processes as aviation system development,
 faultdetection systems, and security.",
 paper = "Fass04.pdf"
}

\end{chunk}

\index{Thurston, William P.}
\begin{chunk}{axiom.bib}
@article{Thur94,
 author = "Thurston, William P.",
 title = {{On Proof and Progress in Mathematics}},
 journal = "Bulletin of the American Mathematical Society",
 volume = "30",
 number = "2",
 year = "1994",
 paper = "Thur94.pdf"
}

\end{chunk}

\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@article{Hoar96,
 author = "Hoare, C.A.R",
 title = {{How did software get so reliable without proof?}},
 journal = "LNCS",
 volume = "1051",
 year = "1996",
 abstract =
 "By surveying current software engineering practice, this paper
 reveals that the techniques employed to achieve reliability are little
 different from those which have proved effective in all other branches
 of modern engineering: rigorous management of procedures for design
 inspection and review; quality assurance based on a wide range of
 targeted tests; continuous evolution by removal of errors from
 products already in widespread use; and defensive programming, among
 other forms of deliberate overengineering. Formal methods and proof
 play a small direct role in large scale programming; but they do
 provide a conceptual framework and basic understanding to promote the
 best of current practice, and point directions for future
 improvement.",
 paper = "Hoar96.pdf"
}

\end{chunk}

\index{Glass, Robert L.}
\begin{chunk}{axiom.bib}
@article{Glas02,
 author = "Glass, Robert L.",
 title = {{The Proof of Correctness Wars}},
 journal = "Communications of the ACM",
 volume = "45",
 number = "8",
 year = "2002",
 pages = "1921",
 paper = "Glas02.pdf"
}

\end{chunk}

\index{Fetzer, James H.}
\begin{chunk}{axiom.bib}
@article{Fetz88,
 author = "Fetzer, James H.",
 title = {{Program Verification: The Very Idea}},
 journal = "Communications of the ACM",
 volume = "31",
 number = "9",
 pages = "10481063",
 year = "1988",
 abstract =
 "The notion of program verification appears to trade upon an
 equvocation. Algorithms, as logical structures, are appropriate
 subjects for deductive verification. Programs, as causal models of
 those structures, are not. The success of program verification as a
 generally applicable and completely reliable method for guaranteeing
 program performance is not even a theoretical possibility.",
 paper = "Fetz88.pdf"
}

\end{chunk}

\index{Manna, Zohar}
\begin{chunk}{axiom.bib}
@misc{Mann80,
 author = "Manna, Zohar",
 title = {{Lectures on the Logic of Computer Programming}},
 publisher = "Society for Industrial and Applied Mathematics",
 year = "1980",
 paper = "Mann80.tgz"
}

\end{chunk}

\index{Manna, Zohar}
\index{Waldinger, Richard}
\begin{chunk}{axiom.bib}
@article{Mann78,
 author = "Manna, Zohar and Waldinger, Richard",
 title = {{The Logic of Computer Programming}},
 journal = "IEEE Trans. on Software Engineering",
 volume = "4",
 number = "3",
 year = "1978",
 abstract =
 "Techniques derived from mathematical logic promise to provide an
 alternative to the conventional methodology for constructing,
 debugging, and optimizing computer programs. Ultimately, these
 techniques are intended to lead to the automation of many of the
 facets of the programming process.

 This paper provides a unified tutorial exposition of the logical
 techniques, illustrating each with examples. The strengths and
 limitations of each technique as a practial programming aid are
 assessed and attempts to implement these methods in experimental
 systems are discussed.",
 paper = "Mann78.pdf"
}

\end{chunk}

\index{Hall, Anthony}
\begin{chunk}{axiom.bib}
@article{Hall90,
 author = "Hall, Anthony",
 title = {{7 Myths of Formal Methods}},
 journal = "IEEE Software",
 volume = "7",
 number = "5",
 pages = "1119",
 year = "1990",
 abstract =
 "Formal methods are difficult, expensive, and not widely useful,
 detractors say. Using a case study and other realworld examples, this
 article challenges such common myths.",
 paper = "Hall80.pdf"
}

\end{chunk}

\index{Dijkstra, Edsger}
\begin{chunk}{axiom.bib}
@misc{Dijk83,
 author = "Dijkstra, Edsger",
 title = {{Fruits of Misunderstanding}},
 year = "1983",
 link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD854.html}",
 paper = "Dijk83.txt"
}

\end{chunk}

\index{Bradley, Aaron R.}
\index{Manna, Zohar}
\begin{chunk}{axiom.bib}
@book{Brad07,
 author = "Bradley, Aaron R. and Manna, Zohar",
 title = {{The Calculus of Computation}},
 publisher = "Springer",
 year = "2007",
 isbn = "9783'540'741121"
}

\end{chunk}

\index{Reid, Alastair}
\begin{chunk}{axiom.bib}
@misc{Reid17,
 author = "Reid, Alastair",
 title = {{How can you trust formally varified software}},
 link = "\url{https://media.ccc.de/v/34c38915how_can_you_trust_formally_verified_software#t=12}",
 year = "2017",
 abstract =
 "Formal verification of software has finally started to become viable:
 we have examples of formally verified microkernels, realistic
 compilers, hypervisors, etc. These are huge achievements and we can
 expect to see even more impressive results in the future but the
 correctness proofs depend on a number of assumptions about the Trusted
 Computing Base that the software depends on. Two key questions to ask
 are: Are the specifications of the Trusted Computing Base correct? And
 do the implementations match the specifications? I will explore the
 philosophical challenges and practical steps you can taek in answering
 that question for one of the major dependencies: the hardware your
 software runs on. I will describe the combination of formal
 verification and testing that ARM uses to verify the processor
 specification and I will talk about our current challenge: getting the
 specification down to zero bugs while the architecture continues to
 evolve."
}

\end{chunk}

\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@misc{Chli17a,
 author = "Chlipala, Adam",
 title = {{Coming Soon: MachineChecked Mathematical Proofs in Everyday
 Software and Hardware Development}},
 link = "\url{https://media.ccc.de/v/34c39105coming_soon_machinechecked_mathematical_proofs_in_everyday_software_and_hardware_development}",
 year = "2017",
 abstract =
 "Most working engineers view machinechecked mathematical proofs as an
 academic curiosity, if they have ever heard of the concept at all. In
 contrast, activities like testing, debugging, and code review are
 accepted as essential. They are woven into the lives of nearly all
 developers. In this talk, I will explain how I see machinechecked
 proofs enabling new everyday activities for developers of computer
 software and hardware. These activities have the potential to lower
 development effort dramatically, at the same time as they increase our
 assurance that systems behave correctly and securely. I will give a
 cosmological overview of this field, answering the FAQs that seem to
 stand in the way of practicality; and I will illustrate the principles
 with examples from projects that you can clone from GitHub today,
 covering the computing stack from digital hardware design to
 cryptographic software and applications.

 Today's developers of computer software and hardware are tremendously
 effective, compared to their predecessors. We have found very
 effective ways of modularizing and validating our work. The talk is
 about ammunition for these activities from a perhapsunexpected
 source.

 Modularity involves breaking a complex system into a hierarchy of
 simpler pieces, which may be written and understood
 separately. Structured programming (e.g., using loops and conditionals
 instead of gotos) helps us read and understand parts of a single
 function in isolation, and data abstraction lets us encapsulate
 important functionality in objects, with guarantees that other code
 can only access the private data by calling public methods. That way,
 we can convince ourselves that the encapsulated code upholds certain
 essential properties, regardless of which other code it is linked
 with. Systematic unit testing also helps enforce contracts for units
 of modularity. Each of these techniques can be rerun automatically, to
 catch regressions in evolving systems, and catch those regressions in
 a way that accurately points the finger of responsibility to
 particular modules.

 Validation is an important part of development that encompasses
 testing, debugging, code review, and anything else that we do to raise
 our confidence that the system behaves as intended. Experienced
 engineers know that validation tends to take up the majority of
 engineering effort. Often that effort involves mentally taxing
 activities that would not otherwise come up in coding. One example is
 thinking about testcase coverage, and another is including
 instrumentation that produces traces to consult during debugging.

 It is not hard for working developers to imagine great productivity
 gains from better ways to break systems into pieces or raise our
 confidence in those pieces. The claim I will make in this talk is that
 a key source of such insights has been neglected: machinechecked
 mathematical proofs. Here the basic functionality is an ASCII language
 for defining mathematical objects, stating theorems about them, and
 giving proofs of theorems. Crucially, an algorithm checks that
 purported proofs really do establish the theorems. By going about
 these activities in the style of programming, we inherit usual
 supporting tools like IDEs, version control, continuous integration,
 and automated build processes. But how could so esoteric a task as
 math proofs call for that kind of tooling, and what does it have to do
 with building real computer systems?

 I will explain a shared vision to that end, developed along with many
 other members of my research community. Let me try to convince you
 that all of the following goals are attainable in the next 10 years.

 \begin{itemize}
 \item We will have complete computer systems implementing moderately
 complex network servers for popular protocols, proved to implement
 those protocols correctly, from the level of digital circuits on
 up. We will remove all deployed code (hardware or software) from the
 trusted computing base, shifting our trust to much smaller
 specifications and proof checkers.
 \item Hobbyists will be able to design new embedded computing
 platforms by mixing and matching opensource hardware and software
 components, also mixing and matching the proofs of these components,
 guaranteeing no bugs at the digitalabstraction level or higher, with
 no need for debugging.
 \item New styles of library design will be enabled by the chance to
 attach a formal behavioral specification to each library. For
 instance, rankandfile programmers will able to assemble their own
 code for cryptographic protocols, with code that looks like reference
 implementations in Python, but getting performance comparable to what
 experts handcraft in assembly today. Yet that benefit would come with
 no need to trust that library authors have avoided bugs or intentional
 backdoors, perhaps even including automatic proofs of cryptographic
 security properties.
 \end{itemize}

 Main technical topics to cover to explain my optimism:
 \begin{itemize}
 \item The basic functionality of proof assistants and why we should
 trust their conclusions
 \item How to think about system decomposition with specifications and
 proofs, including why, for most components, we do not need to worry
 about specification mistakes
 \item The different modes of applying proof technology to check or
 generate components
 \item The engineering techniques behind costeffective proof authoring
 for realistic systems
 \item A hardware case study: Kami, supporting componentbased digital
 hardware authoring with proofs
 \item A software case study: Fiat Cryptography, supporting
 correctbyconstruction autogeneration of fast code for
 ellipticcurve cryptography
 \item Pointers to where to look next, if you would like to learn more
 about this technology
 \end{itemize}"
}

\end{chunk}

\index{Wilkes, Maurice}
\begin{chunk}{axiom.bib}
@book{Wilk85,
 author = "Wilkes, Maurice",
 title = {{Memoirs of a Computer Pioneer}},
 publisher = "MIT Press",
 year = "1985"
}

\end{chunk}

\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@book{Appe17,
 author = "Appel, Andrew W.",
 title = {{Verified Functional Algorithms}},
 year = "2017",
 publisher = "University of Pennsylvania",
 link =
 "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
}

\end{chunk}
diff git a/src/axiomwebsite/index.html b/src/axiomwebsite/index.html
index bc86346..9b4d2eb 100644
 a/src/axiomwebsite/index.html
+++ b/src/axiomwebsite/index.html
@@ 177,6 +177,19 @@
+ The effort to prove Axiom correct is being supported by Carnegie Mellon
+ University. In particular, we gratefully acknowledge support by
+
+
+ Jeremy Avigad CMU Professor of Philosophy and Mathematical Sciences
+ Frank Pfenning CMU Professor (Dept. Head)
+ Robert Harper CMU Professor
+ Karl Crary CMU Associate Professor
+ Klaus Sutner CMU Teaching Professor
+ Andre Platzer CMU Associate Professor
+
+
+
Axiom development was partially supported by
CAISS,
the Center for Algorithms and Interactive Scientific Software.
@@ 201,7 +214,7 @@
width="88" height="31" border="0" alt="SourceForge.net Logo" />
 Last Site Update: January 2015
+ Last Site Update: January 2018
diff git a/src/axiomwebsite/patches.html b/src/axiomwebsite/patches.html
index df00da9..04ab98d 100644
 a/src/axiomwebsite/patches.html
+++ b/src/axiomwebsite/patches.html
@@ 5878,6 +5878,8 @@ books/bookvol5 fix typos
books/bookvolbib add references
20171230.01.tpd.patch
books/bookvol13 next draft
+20171230.02.tpd.patch
+src/axiomwebsite/index.html Acknowledge CMU support

1.9.1