diff git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index 600363f..a1f77c6 100644
 a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ 13,6 +13,7 @@ The third section sorts papers by topic.
\section{Linear Algebra} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Kaltofen, Erich}
\begin{chunk}{axiom.bib}
@Unpublished{Kalt01,
author = "Kaltofen, E.",
@@ 20,11 +21,34 @@ The third section sorts papers by topic.
over finite fields (Invited talk)",
year = "2001",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/01/Ka01_Fq6.pdf",
 paper = "Kalt01.pdf"
+ paper = "Kalt01.pdf",
+ keywords = "survey",
+ abstract = "
+ Sparse and structured matrices over finite fields occur in many
+ settings. Sparse linear systems arise in sievebased integer factoring
+ and discrete logarithm algorithms. Structured matrices arise in
+ polynomial factoring algorithms; one example is the famous Qmatrix
+ from Berlekamp's method. Sparse diophantine linear problems, like
+ computing the Smith canonical form of an integer matrix or computing
+ an integer solution to a sparse linear system, are reduced via padic
+ lifting to sparse matrix analysis over a finite field.
+
+ In the past 10 years there has been substantial activity on the
+ improvement of a solution proposed by Wiedemann in 1986. The main new
+ ingredients are faster preconditioners, projections by an entire
+ block of random vectors, Lanczos recurrences, and a connection to
+ Kalman realizations of control theory. My talk surveys these
+ developments and describe some major unresolved problems."
}
\end{chunk}
+\index{Chen, L.}
+\index{Eberly, W.}
+\index{Kaltofen, Erich}
+\index{Saunders, B. David}
+\index{Turner, W. J.}
+\index{Villard, Gilles}
\begin{chunk}{axiom.bib}
@Article{Chen02,
author = "Chen, L. and Eberly, W. and Kaltofen, E.
@@ 35,11 +59,30 @@ The third section sorts papers by topic.
volume = "343344",
pages = "119146",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/02/CEKSTV02.pdf",
 paper = "Chen02.pdf"
+ paper = "Chen02.pdf",
+ abstract = "
+ The main idea of the ``black box'' approach in exact linear algebra is
+ to reduce matrix problems to the computation of minimum polynomials.
+ In most cases preconditioning is necessary to obtain the desired
+ result. Here good preconditioners will be used to ensure geometrical
+ / algebraic properties on matrices, rather than numerical ones, so we
+ do not address a condition number. We offer a review of problems for
+ which (algebraic) preconditioning is used, provide a bestiary of
+ preconditioning problems, and discuss several preconditioner types to
+ solve these problems. We present new conditioners, including
+ conditioners to preserve low displacement rank for Toeplitzlike
+ matrices. We also provide new analyses of preconditioner performance
+ and results on the relations among preconditioning problems and with
+ linear algebra problems. Thus, improvements are offered for the
+ efficiency and applicability of preconditioners. The focus is on
+ linear algebra problems over finite fields, but most results are valid
+ for entries from arbitrary fields."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Storjohann, Arne}
\begin{chunk}{axiom.bib}
@InCollection{Kalt11d,
author = "Kaltofen, Erich and Storjohann, Arne",
@@ 48,11 +91,24 @@ The third section sorts papers by topic.
crossref = "EACM",
year = "2011",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/11/KS11.pdf",
 paper = "Kalt11d.pdf"
+ paper = "Kalt11d.pdf",
+ abstract = "
+ Computational problems in exact linear algebra including computing an
+ exact solution of a system of linear equations with exact scalars,
+ which can be exact rational numbers, integers modulo a prime number,
+ or algebraic extensions of those represented by their residues modulo
+ a minimum polynomial. Classical linear algebra problems are computing
+ for a matrix its rank, determinant, characteristic and minimal
+ polynomial, and rational canonical form (= Frobenius normal form). For
+ matrices with integer and polynomial entries one computes the Hermite
+ and Smith normal forms. If a rational matrix is symmetric, one
+ determines if the matrix is definite."
}
\end{chunk}
+\index{Comer, Matthew T.}
+\index{Kaltofen, Erich}
\begin{chunk}{axiom.bib}
@Article{Come12,
author = "Comer, Matthew T. and Kaltofen, Erich L.",
@@ 65,11 +121,27 @@ The third section sorts papers by topic.
number = "4",
pages = "480491",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/10/CoKa10.pdf",
 paper = "Come12.pdf"
+ paper = "Come12.pdf",
+ abstract = "
+ We derive an explicit count for the number of singular $n\times n$
+ Hankel (Toeplitz) matrices whose entries range over a finite field
+ with $q$ elements by observing the execution of the Berlekamp / Massey
+ algorithm on its elements. Our method yields explicit counts also when
+ some entries above or on the antidiagonal (diagonal) are fixed. For
+ example, the number of singular $n\times n$ Toeplitz matrices with 0's
+ on the diagonal is $q^{2n3}+q^{n1}q^{n2}$.
+
+ We also derive the count for all $n\times n$ Hankel matrices of rank
+ $r$ with generic rank profile, I.e., whose first $r$ leading principal
+ submatrices are nonsingular and the rest are singular, namely
+ $q^r(q1)^r$ in the case $r < n$ and $q^{r1}(q1)^r$ in the case
+ $r=n$. This result generalizes to blockHankel matrices as well."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Yuhasz, George}
\begin{chunk}{axiom.bib}
@Article{Kalt13a,
author = "Kaltofen, Erich and Yuhasz, George",
@@ 81,11 +153,23 @@ The third section sorts papers by topic.
month = "November",
pages = "25152526",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/08/KaYu08.pdf",
 paper = "Kalt13a.pdf"
+ paper = "Kalt13a.pdf",
+ abstract = "
+ We describe a fraction free version of the Matrix Berlekamp / Massey
+ algorithm. The algorithm computes a minimal matrix generator of
+ linearly generated square matrix sequences over an integral
+ domain. The algorithm performs all operations in the integral domain,
+ so all divisions performed are exact. For scalar sequences, the matrix
+ algorithm specializes to a more efficient algorithm than the algorithm
+ currently in the literature. The proof of integrality of the matrix
+ algorithm gives a new proof of integrality for the scalar
+ specialization."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Yuhasz, George}
\begin{chunk}{axiom.bib}
@Article{Kalt13,
author = "Kaltofen, Erich and Yuhasz, George",
@@ 96,11 +180,23 @@ The third section sorts papers by topic.
month = "September",
journal = "ACM Trans. Algorithms",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/06/KaYu06.pdf",
 paper = "Kalt13.pdf"
+ paper = "Kalt13.pdf",
+ abstract = "
+ We analyze the Matrix Berlekamp / Massey algorithm, which generalizes
+ the Berlekamp / Massey algorithm [Massey 1969] for computing linear
+ generators of scalar sequences. The Matrix Berlekamp / Massey
+ algorithm computes a minimal matrix generator of a linearly generated
+ matrix sequence and has been first introduced by Rissanen [1972a],
+ Dickinson et al. [1974], and Coppersmit [1994]. Our version of the
+ algorithm makes no restrictions on the rank and dimensions of the
+ matrix sequence. We also give new proofs of correctness and complexity
+ for the algorithm, which is based on selfcontained loop invariants
+ and includes an explicit termination criterion for a given
+ determinantal degree bound of the minimal matrix generator"
}

\end{chunk}
+\index{Kaltofen, Erich}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt02a,
author = "Kaltofen, Erich",
@@ 116,6 +212,8 @@ The third section sorts papers by topic.
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Villard, Gilles}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt01a,
author = "Kaltofen, E. and Villard, G.",
@@ 127,11 +225,29 @@ The third section sorts papers by topic.
isbn = "981024763X",
year = "2001",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/01/KaVi01.pdf",
 paper = "Kalt01a.pdf"
+ paper = "Kalt01a.pdf",
+ abstract = "
+ The computation of the determinant of an $n\times n$ matrix $A$ of
+ numbers or polynomials is a challenge for both numerical and symbolic
+ methods. Numerical methods, such as Clarkson's algorithm [10,7] for
+ the sign of the determinant must deal with conditionedness that
+ determines the number of mantissa bits necessary for obtaining a
+ correct sign. Symbolic algorithms that are based on Chinese
+ remaindering [6,17,Chapter 5.5] must deal with the fact that the
+ length of the determinant in the worse case grows linearly in the
+ dimension of the matrix. Hence the number of modular operations is $n$
+ times the number of arithmetic operations in a given algorithm.
+ Hensel lifting combined with rational number recovery [14,1] has cubic
+ bit complexity in $n$, but the algorithm can only determine a factor
+ of the determinant, namely the largest invariant factor. If the matrix
+ is similar to a multiple of the identity matrix, the running time is
+ again that of Chinese remaindering."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Villard, Gilles}
\begin{chunk}{axiom.bib}
@Article{Kalt04a,
author = "Kaltofen, Erich and Villard, Gilles",
@@ 148,6 +264,8 @@ The third section sorts papers by topic.
\end{chunk}
+\index{Eberly, W.}
+\index{Kaltofen, Erich}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt97b,
author = "Eberly, W. and Kaltofen, E.",
@@ 157,14 +275,62 @@ The third section sorts papers by topic.
crossref = "ISSAC97",
pages = "176183",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/97/EbKa97.pdf",
 paper = "Kalt97b.pdf"

}

\end{chunk}

\begin{chunk}{axiom.bib}
@InProceedings{Kalt94b,
+ paper = "Kalt97b.pdf",
+ abstract = "
+ Las Vegas algorithms that are based on Lanczo's method for solving
+ symmetric linear systems are presented and analyzed. These are
+ compared to a similar randomized Lanczos algorithm that has been used
+ for integer factorization, and to the (provably reliable) algorithm of
+ Wiedemann. The analysis suggests that our Lanczos algorithms are
+ preferable to several versions of Wiedemann's method for computations
+ over large fields, expecially for certain symmetric matrix
+ computations."
+}
+
+\end{chunk}
+
+The Sylvester matrix is used to compute the {\bf resultant} of two
+polynomials. The Sylvester matrix is formed from the coefficients of
+the two polynomials. Given a polynomial with degree $m$ and another of
+degree $n$ form an $(m+n)\times(m+n)$ matrix by filling the matrix
+from the upper left corner with the coefficients of the first
+polynomial then shifting down one row and one column to the right and
+filling in the coefficients starting there until they hit the right
+column. Starting at the next row, do the same process for the second
+polynomial. The determinant of this matrix is the {\bf resultant} of
+the two polynomials.
+
+For example, given $a_3x^3+a_2x^2+a_1x+a_0$ and $b_2x^2+b_1x+b_0$
+the Sylvester matrix is a $(3+2)\times(3+2)$ matrix:
+\[\left[\begin{array}{ccccc}
+a_3 & a_2 & a_1 & a_0 & 0\\
+0 & a_3 & a_2 & a_1 & a_0\\
+b_2 & b_1 & b_0 & 0 & 0\\
+0 & b_2 & b_1 & b_0 & 0\\
+0 & 0 & b_2 & b_1 & b_0
+\end{array}\right]\]
+
+The resultant of these two polynomials (assuming a leading coefficient
+of 1), is the product of the differences $p_iq_i$ between the roots
+of the polynomials. If there are roots in common then the product
+will contain a 0 and the whole equation reduces to 0. This can be
+used to determine if two polynomials have common roots.
+
+For example, given a polynomial in $x$ with distinct roots $a_1$
+and $a_2$ it can be factored as $t1:=(xa_1)(xa_2)$.
+
+Given a second polynomial in $x$ with distinct roots $b_1$, $b_2$,
+and $b_3$ it can be factored as $t2:=(xb_1)(xb_2)(xb_3)$.
+
+The Axiom call of $resultant(t1,t2,x)$ is
+\[ (b_1a_2)(b_1a_1)(b_2a_2)(b_2a_1)(b_3a_2)(b_3a_1) \]
+
+In symbolic form the resultant can show the multiplicity of roots
+when shown in factored form.
+
+\index{Kaltofen, Erich}
+\begin{chunk}{axiom.bib}
+@InProceedings{Kalt94c,
author = "Kaltofen, E.",
title = "Asymptotically fast solution of {Toeplitz}like singular
linear systems",
@@ 173,14 +339,30 @@ The third section sorts papers by topic.
crossref = "ISSAC94",
year = "1994",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/94/Ka94_issac.pdf",
 paper = "Kalt94b.pdf"
+ paper = "Kalt94c.pdf",
+ abstract = "
+ The Toeplitzlikeness of a matrix (Kailath et al. 1979) is the
+ generalization of the notion that a matrix is Toeplitz. Block matrices
+ with Toeplitz blocks, such as the Sylvester matrix corresponding to
+ the resultant of two univariate polynomials, are Toeplitzlike, as are
+ products and inverses of Toeplitzlike matrices. The displacement rank
+ of a matrix is a measure for the degree of being Toeplitzlike. For
+ example, an $r\times s$ block matrix with Toeplitz blocks has
+ displacement rank $r+s$ whereas a generic $N\ximtes N$ matrix has
+ displacement rank $N$. A matrix of displacement rank $\alpha$ can be
+ implicitly represented by a sum of $\alpha$ matrices, each of which is
+ the product of a lower trainagular and an upper triangular Toeplitz
+ matrices. Such a $\sigmaLU$ representation can usually be obtained
+ efficiently."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Lobo, A.}
\begin{chunk}{axiom.bib}
@Article{Kalt99,
 author = "Kaltofen, E. and Lobo, A",
+ author = "Kaltofen, E. and Lobo, A.",
title = "Distributed matrixfree solution of large sparse linear systems over
finite fields",
journal = "Algorithmica",
@@ 190,11 +372,27 @@ The third section sorts papers by topic.
volume = "24",
number = "34",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/99/KaLo99.pdf",
 paper = "Kalt99.pdf"
+ paper = "Kalt99.pdf",
+ abstract = "
+ We describe a coarsegrain parallel approach for the homogeneous
+ solution of linear systems. Our solutoins are symbolic, i.e., exact
+ rather than numerical approximations. We have performed an outer loop
+ parallelization that works well in conjunction with a black box
+ abstraction for the coefficient matrix. Our implementation can be run
+ on a network cluster of UNIX workstations as well as on an SP2
+ multiprocessor. Task distribution and management are effected through
+ MPI and other packages. Fault tolerance, checkpointing, and recovery
+ are incorporated. Detailed timings are presented for experiments with
+ systems that arise in RSA challenge integer factoring efforts. For
+ example, we can solve a 252,222$\times$252,222 system with about 11.04
+ million nonzero entries over the Galois field with two elements using
+ four processors of an SP2 multiprocessor, in about 26.5 hours CPU time."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Lobo, A.}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt96a,
author = "Kaltofen, E. and Lobo, A.",
@@ 208,11 +406,23 @@ The third section sorts papers by topic.
publisher = "Simulation Councils, Inc.",
address = "San Diego, CA",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/96/KaLo96_hpc.pdf",
 paper = "Kalt96a.pdf"
+ paper = "Kalt96a.pdf",
+ abstract = "
+ We describe a coarsegrain parallel software system for the
+ homogeneous solution of linear systems. Our solutions are symbolic,
+ i.e., exact rather than numerical approximations. Our implementation
+ can be run on a network cluster of SPARC20 computers and on an SP2
+ multiprocessor. Detained timings are presented for experiments with
+ systems that arise in RSA challenge integer factoring efforts. For
+ example, we can solve a 252,222$\times$252,222 system with about 11.04
+ million nonzero entries over the Galois field with 2 elements using 4
+ processors of an SP2 multiprocessor, in about 26.5 hours CPU time."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Lobo, A.}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt94a,
author = "Kaltofen, E. and Lobo, A.",
@@ 223,11 +433,22 @@ The third section sorts papers by topic.
pages = "9098",
year = "1994",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/94/KaLo94.ps.gz",
 paper = "Kalt94a.ps"
+ paper = "Kalt94a.ps",
+ abstract = "
+ Modern techniques for solving structured linear systems over finite
+ fields, which use the coefficient matrix as a black box and require an
+ efficient algorithm for multiplying this matrix by a vector, are
+ applicable to the classical algorithm for factoring a univariate
+ polynomial over a finite field by Berlekamp (1967 and 1970). We report
+ aon a computer implementation of this idea that is based on the
+ parallel block Wiedemann linear system solver (Coppersmit 1994 and
+ Kaltofen 1993 and 1995). The program uses randomization and we also
+ study the expected run time behavior of our method."
}
\end{chunk}
+\index{Kaltofen, Erich}
\begin{chunk}{axiom.bib}
@Article{Kalt95,
author = "Kaltofen, E.",
@@ 239,11 +460,26 @@ The third section sorts papers by topic.
number = "210",
pages = "777806",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/95/Ka95_mathcomp.pdf",
 paper = "Kalt95.pdf"
+ paper = "Kalt95.pdf",
+ abstract = "
+ By using projections by a block of vectors in place of a single vector
+ it is possible to parallelize the outer loop of iterative methods for
+ solving sparse linear systems. We analyze such a scheme proposed by
+ Coppersmith for Wiedemann's coordinate recurrence algorithm, which is
+ based in part on the Krylov subspace approach. We prove that by use of
+ certain radomizations on the input system the parallel speed up is
+ roughly by the number of vectors in the blocks when using as many
+ processors. Our analysis is valid for fields of entries that have
+ sufficiently large cardinality. Our analysis also deals with an
+ arising subproblem of solving a singular block Toeplitz system by use
+ of the theory of Toeplitzlike matrices."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Krishnamoorthy, M.S.}
+\index{Saunders, B. David}
\begin{chunk}{axiom.bib}
@Article{Kalt90a,
author = "Kaltofen, E. and Krishnamoorthy, M.S. and Saunders, B.D.",
@@ 253,11 +489,30 @@ The third section sorts papers by topic.
volume = "136",
pages = "189208",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/90/KKS90.pdf",
 paper = "Kalt90a.pdf"
+ paper = "Kalt90a.pdf",
+ abstract = "
+ Here we offer a new randomized parallel algorithm that determines the
+ Smith normal form of a matrix with entries being univariate
+ polynomials with coefficients in an arbitrary field. The algorithm has
+ two important advantages over our previous one: the multipliers
+ related the Smith form to the input matrix are computed, and the
+ algorithm is probabilistic of Las Vegas type, i.e., always finds the
+ correct answer. The Smith form algorithm is also a good sequential
+ algorithm. Our algorithm reduces the problem of Smith form
+ computations to two Hermite form computations. Thus the Smith form
+ problem has complexity asymptotically that of the Hermite form
+ problem. We also construct fast parallel algorithms for Jordan normal
+ form and testing similarity of matrices. Both the similarity and
+ nonsimilarity problems are in the complexity class RNC for the usual
+ coefficient fields, i.e., they can be probabilistically decided in
+ polylogarithmic time using polynomially many processors."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Krishnamoorthy, M.S.}
+\index{Saunders, B. David}
\begin{chunk}{axiom.bib}
@Article{Kalt87,
author = "Kaltofen, E. and Krishnamoorthy, M.S. and Saunders, B.D.",
@@ 268,11 +523,23 @@ The third section sorts papers by topic.
volume = "8",
pages = "683690",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/87/KKS87.pdf",
 paper = "Kalt87.pdf"
+ paper = "Kalt87.pdf",
+ abstract = "
+ Boolean circuits of polynomial size and polylogarithmic depth are
+ given for computing the Hermite and Smith normal forms of polynomial
+ matrices over finite fields and the field of rational numbers. The
+ circuits for the Smith normal form computation are probabilistic ones
+ and also determine very efficient sequential algorithms. Furthermore,
+ we give a polynomialtime deterministic sequential algorithm for the
+ Smith normal form over the rationals. The Smith normal form algorithms
+ are applied to the Rational canonical form of matrices over finite
+ fields and the field of rational numbers."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Pan, V.}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt92,
author = "Kaltofen, E. and Pan, V.",
@@ 284,11 +551,24 @@ The third section sorts papers by topic.
publisher = "IEEE Computer Society Press",
address = "Los Alamitos, California",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/92/KaPa92.pdf",
 paper = "Kalt92.pdf"
+ paper = "Kalt92.pdf",
+ abstract = "
+ We show that over any field, the solution set to a system of $n$
+ linear equations in $n$ unknowns can be computed in parallel with
+ randomization simultaneously in polylogarithmic time in $n$ and with
+ only as many processors as are utilized to multiply two $n\times n$
+ matrices. A time unit represents an arithmetic operation in the
+ field. For singular systems our parallel timings are asymptotically as
+ fast as those for nonsingular systems, due to our avoidance of binary
+ search in the matrix rank problem, except when the field has small
+ positive characteristic; in that case, binary search is avoided to a
+ somewhat higher processor count measure."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Pan, V.}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt91c,
author = "Kaltofen, E. and Pan, V.",
@@ 300,11 +580,25 @@ The third section sorts papers by topic.
year = "1991",
address = "New York, N.Y.",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/91/KaPa91.pdf",
 paper = "Kalt91c.pdf"
+ paper = "Kalt91c.pdf",
+ abstract = "
+ Parallel randomized algorithms are presented that solve
+ $n$dimensional systems of linear equations and compute inverses of
+ $n\times n$ nonsingular matrices over a field in $O((log n)^2)$ time,
+ where each time unit represents an arithmetic operation in the field
+ generated by the matrix entries. The algorithms utilize with a $O(log n)$
+ factor as many processors as are needed to multiply two $n\times n$
+ matrices. The algorithms avoid zero divisions with controllably
+ high probability provided the $O(n)$ random elements used are selected
+ uniformly from a sufficiently large set. For fields of small positive
+ characteristics, the processor count measures of our solutions are
+ somewhat higher."
}
\end{chunk}
+\index{Kaltofen, Erich}
+\index{Saunders, B. David}
\begin{chunk}{axiom.bib}
@InProceedings{Kalt91,
author = "Kaltofen, E. and Saunders, B.D.",
@@ 317,13 +611,39 @@ The third section sorts papers by topic.
publisher = "SpringerVerlag",
year = "1991",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/91/KaSa91.pdf",
 paper = "Kalt91.pdf"
+ paper = "Kalt91.pdf",
+ abstract = "
+ Douglas Wiedemann's (1986) landmark approach to solving sparse linear
+ systems over finite fields provides the symbolic counterpart to
+ noncombinatorial numerical methods for solving sparse linear systems,
+ such as the Lanczos or conjugate gradient method (see Golub and van
+ Load (1983)). The problem is to solve a sparse linear system, when the
+ individual entries lie in a generic field, and the only operations
+ possible are field arithmethic; the solution is to be exact. Such is
+ the situation, for instance, if one works in a finite field. Wiedemann
+ bases his approach on Krylov subspaces, but projects further to a
+ sequence of individual field elements. By making a link to the
+ Berlekamp / Massey problem from coding theory  the coordinate
+ recurrences  and by using randomization an algorithm is obtained
+ with the following property. On input of an $n\times n$ coefficient
+ matrix $A$ given by a socalled black box, which is a program that can
+ multiply the matrix by a vector (see Figure 1), and of a vector $b$,
+ the algorithm finds, with high probability in case the system is
+ solvable, a random solution vector $x$ with $Ax=b$. It is assumed that
+ the field has sufficiently many elements, say no less than $50n^2
+ log(x)$, otherwise one goes to a finite algebraic extension. The
+ complexity of the method is in the general singular case $O(n log
+ (n))$ calls to the black box for $A$ and an additional $O(n^2
+ log(n)^2)$ field arithmetic operations."
}
\end{chunk}
\section{Algebraic Algorithms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Diaz, A.}
+\index{Kaltofen, Erich}
+\index{Pan, V.}
\begin{chunk}{axiom.bib}
@InCollection{Diaz97,
author = "Diaz, A. and Kaltofen, E. and Pan, V.",
@@ 337,11 +657,29 @@ The third section sorts papers by topic.
chapter = "10",
keywords = "survey",
url = "http://www.math.ncsu.edu/~kaltofen/bibliography/97/DKP97.ps.gz",
 paper = "Diaz97.ps"
+ paper = "Diaz97.ps",
+ abstract = "
+ The title's subject is the algorithmic approach to algebra: arithmetic
+ with numbers, polynomials, matrices, differential polynomials, such as
+ $y^{\prime\prime} + (1/2 + x^4/4)y$, truncated series,
+ and algebraic sets, i.e.,
+ quantified expressions such as $\exists x \in {\bf R}: x^4+p\cdot x+q=0$,
+ which describes a subset of the twodimensional space with
+ coordinates $p$ and $q$ for which the given quartic equation has a
+ real root. Algorithms that manipulate such objects are the backbone
+ of modern symbolic mathematics software such as the Maple and
+ Mathematica systems, to name but two among many useful systems. This
+ chapter restricts itself to algorithms in four areas: linear matrix
+ algebra, root finding ov univariate polynomials, solution of systems
+ of nonlinear algebraic equations, and polynomial factorization."
}
\end{chunk}
+\index{Diaz, A.}
+\index{Emiris, I.}
+\index{Kaltofen, Erich}
+\index{Pan, V.}
\begin{chunk}{axiom.bib}
@InCollection{Diaz99,
author = "Diaz, A. and Emiris, I. and