diff pruN 0.53.201204142/btool/Makefile 0.56.20180123.12/btool/Makefile
 0.53.201204142/btool/Makefile 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/Makefile 20170509 11:46:22.000000000 +0000
@@ 33,16 +33,16 @@ endif
allobj: $(allobj)
$(foreach dir, $(srcdir), $(dir)/%): FORCE; $(MAKE) C $(@D) $(@F)
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o: %.cc; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
completepdf: completepdf.o $(allobj)
 g++ $(warn) $(optim) lpoppler $^ o $@
+ g++ $(warn) $(optim) $^ lpoppler o $@
romanize: romanize.o Util/roman_numeral.o
g++ $(warn) $(optim) $^ o $@
a.out: test.o $(allobj)
 g++ $(warn) $(optim) lpoppler $^ o $@
+ g++ $(warn) $(optim) $^ lpoppler o $@
cleanless:
$(foreach dir, $(srcdir), $(MAKE) C $(dir) clean ;)
diff pruN 0.53.201204142/btool/Makefilesubdir 0.56.20180123.12/btool/Makefilesubdir
 0.53.201204142/btool/Makefilesubdir 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/Makefilesubdir 20170509 11:46:22.000000000 +0000
@@ 2,6 +2,9 @@
# This makefile is meant to be used only when accessed
# through a symbolic link from an immediate subdirectory.
+CXXFLAGS += std=c++11
+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
warn := Wall Wextra
include ../Makefileoptim
@@ 15,7 +18,7 @@ ifneq ($(strip $(filterout $(clean), $(
include $(alld)
endif
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
cleanless:
rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/btool/Page_no/Makefile 0.56.20180123.12/btool/Page_no/Makefile
 0.53.201204142/btool/Page_no/Makefile 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/Page_no/Makefile 20170509 11:46:22.000000000 +0000
@@ 2,6 +2,9 @@
# This makefile is meant to be used only when accessed
# through a symbolic link from an immediate subdirectory.
+CXXFLAGS += std=c++11
+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
warn := Wall Wextra
include ../Makefileoptim
@@ 15,7 +18,7 @@ ifneq ($(strip $(filterout $(clean), $(
include $(alld)
endif
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
cleanless:
rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/btool/PDF/Makefile 0.56.20180123.12/btool/PDF/Makefile
 0.53.201204142/btool/PDF/Makefile 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/Makefile 20170509 11:46:22.000000000 +0000
@@ 2,6 +2,9 @@
# This makefile is meant to be used only when accessed
# through a symbolic link from an immediate subdirectory.
+CXXFLAGS += std=c++11
+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
warn := Wall Wextra
include ../Makefileoptim
@@ 15,7 +18,7 @@ ifneq ($(strip $(filterout $(clean), $(
include $(alld)
endif
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
cleanless:
rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/btool/PDF/PDF.cc 0.56.20180123.12/btool/PDF/PDF.cc
 0.53.201204142/btool/PDF/PDF.cc 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/PDF.cc 20170509 16:49:16.000000000 +0000
@@ 1,14 +1,115 @@
#include "PDF.h"
#include
+#include
+#include
+#include
#include "PDF_rep.h"
int PDF::file_length( const PDF &pdf ) {
return pdf.rep>file_length1;
}
int PDF::offset_last_xref_table( const PDF &pdf ) {
 return pdf.rep>xref>getLastXRefPos();
+int PDF::offset_last_xref_table( const std::string &pdf_filename ) {
+
+ // Update in 2015, seven years after most of the rest of the file and
+ // program were written:
+ //
+ // In an earlier version of the progam, this function's body was
+ // a oneliner:
+ //
+ // return pdf.rep>xref>getLastXRefPos();
+ //
+ // That worked fine until Libpoppler changed its interface, since
+ // which it has hidden the required function in the private section
+ // of an interface, unusable here. In a later version of the
+ // program, this function's body was a different oneliner:
+ //
+ // return pdf.rep>xref>getEntry(0)>offset;
+ //
+ // This unfortunately does the wrong thing, though, with effects
+ // Salvatore Bonaccorso has noticed and brought to attention.
+ // Accordingly, this function itself must now find the position of
+ // the last XRef table, as follows.
+ //
+ // Fortunately, the PDF standard requires the position of an XRef
+ // table to be given in plain ascii, so finding the position is not
+ // too hard. One must only be sure to find the position of
+ // the *last* XRef table.
+ //
+ // The code is not quite as elegant as it might be, but the whole
+ // program needs cleaning up, so let us not worry about that for now.
+ // (The programmer's C++ style was pretty immature back in 2008 in
+ // any case.)
+ //
+ //
+
+ const char key_token[] = "startxref";
+
+ int offset = 1;
+
+ std::ifstream pdf_file(pdf_filename);
+ bool has_preceding_whitespace = true;
+ char digit_stage[] = " ";
+ int c = std::ifstream::traits_type::eof();
+ const char *p = key_token;
+
+ while (true) {
+
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+
+ if (!has_preceding_whitespace  c != *p) {
+ p = key_token;
+ has_preceding_whitespace = std::isspace(c);
+ }
+
+ else {
+
+ ++p;
+
+ if (!*p) {
+
+ // Skip whitespace between key token and offset.
+ bool has_trailing_whitespace = false;
+ while (true) {
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+ if (!std::isspace(c)) break;
+ has_trailing_whitespace = true;
+ }
+
+ if (has_trailing_whitespace) {
+
+ // The key token has been found, so prepare to read the offset.
+ offset = 1;
+
+ // Read the offset.
+ if (std::isdigit(c)) {
+ digit_stage[0] = c;
+ offset = std::atoi(digit_stage);
+ while (true) {
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+ if (!std::isdigit(c)) break;
+ offset *= 10;
+ digit_stage[0] = c;
+ offset += std::atoi(digit_stage);
+ }
+ }
+
+ }
+
+ p = key_token;
+
+ }
+
+ }
+
+ }
+
+ done: return offset;
+
}
PDF::Iref PDF::iref_catalog( const PDF &pdf ) {
diff pruN 0.53.201204142/btool/PDF/PDF.h 0.56.20180123.12/btool/PDF/PDF.h
 0.53.201204142/btool/PDF/PDF.h 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/PDF.h 20170509 11:46:22.000000000 +0000
@@ 44,7 +44,7 @@ namespace PDF {
struct PDF_rep;
class PDF;
int file_length ( const PDF &pdf );
 int offset_last_xref_table( const PDF &pdf );
+ int offset_last_xref_table( const std::string &pdf_filename );
Iref iref_catalog ( const PDF &pdf );
Iref iref_info ( const PDF &pdf );
int n_obj ( const PDF &pdf );
@@ 75,7 +75,7 @@ class PDF::PDF {
explicit PDF( const std::string &filename );
~PDF();
friend int file_length ( const PDF &pdf );
 friend int offset_last_xref_table( const PDF &pdf );
+ friend int offset_last_xref_table( const std::string &pdf_filename );
friend Iref iref_catalog ( const PDF &pdf );
friend Iref iref_info ( const PDF &pdf );
friend int n_obj ( const PDF &pdf );
diff pruN 0.53.201204142/btool/PDF/update_catalog.cc 0.56.20180123.12/btool/PDF/update_catalog.cc
 0.53.201204142/btool/PDF/update_catalog.cc 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/update_catalog.cc 20190107 01:19:37.000000000 +0000
@@ 169,7 +169,7 @@ string PDF::add_title_to_info(
if ( obj_old.isNull() ) {
Object obj_new;
{
 GooString gs( title.c_str() );
+ GooString &gs = *( new GooString( title.c_str() ) );
obj_new.initString( &gs );
}
info>add( s_Title, &obj_new );
@@ 212,7 +212,8 @@ string PDF::add_title_to_info(
string PDF::update_trailer(
PDF &pdf,
const int n_pdf_obj,
 const int offset_xref
+ const int offset_xref,
+ const std::string &pdf_filename
) {
PDF_rep *const rep = pdf.get_PDF_rep(magic);
@@ 241,7 +242,7 @@ string PDF::update_trailer(
char s_Prev[] = "Prev";
{
Object obj;
 obj.initInt( offset_last_xref_table(pdf) );
+ obj.initInt( offset_last_xref_table(pdf_filename) );
new_trailer>add( s_Prev, &obj );
}
diff pruN 0.53.201204142/btool/PDF/update_catalog.h 0.56.20180123.12/btool/PDF/update_catalog.h
 0.53.201204142/btool/PDF/update_catalog.h 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/update_catalog.h 20170509 11:46:22.000000000 +0000
@@ 21,7 +21,8 @@ namespace PDF {
std::string update_trailer(
PDF &pdf,
int n_pdf_obj,
 int offset_xref
+ int offset_xref,
+ const std::string &pdf_filename
);
}
diff pruN 0.53.201204142/btool/PDF/updator.cc 0.56.20180123.12/btool/PDF/updator.cc
 0.53.201204142/btool/PDF/updator.cc 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/updator.cc 20170509 11:46:22.000000000 +0000
@@ 15,6 +15,7 @@ string PDF::updator(
PDF &pdf,
const Page_no::PS_page_numbering &nog,
const TOC::Table &toc,
+ const std::string &pdf_filename,
const string &title
) {
@@ 79,7 +80,8 @@ string PDF::updator(
string trailer = update_trailer(
pdf,
n_obj(pdf) + outline.size(),
 file_offset
+ file_offset,
+ pdf_filename
);
string res;
@@ 110,6 +112,7 @@ string PDF::updator(
pdf,
Page_no::PS_page_numbering( filename_ps ),
TOC::Table( filename_toc ),
+ filename_pdf,
title
);
}
diff pruN 0.53.201204142/btool/PDF/updator.h 0.56.20180123.12/btool/PDF/updator.h
 0.53.201204142/btool/PDF/updator.h 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/PDF/updator.h 20170509 11:46:22.000000000 +0000
@@ 19,6 +19,7 @@ namespace PDF {
PDF &pdf,
const Page_no::PS_page_numbering &nog,
const TOC::Table &toc,
+ const std::string &pdf_filename,
const std::string &title = std::string()
);
std::string updator(
diff pruN 0.53.201204142/btool/TOC/Makefile 0.56.20180123.12/btool/TOC/Makefile
 0.53.201204142/btool/TOC/Makefile 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/TOC/Makefile 20170509 11:46:22.000000000 +0000
@@ 2,6 +2,9 @@
# This makefile is meant to be used only when accessed
# through a symbolic link from an immediate subdirectory.
+CXXFLAGS += std=c++11
+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
warn := Wall Wextra
include ../Makefileoptim
@@ 15,7 +18,7 @@ ifneq ($(strip $(filterout $(clean), $(
include $(alld)
endif
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
cleanless:
rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/btool/Util/Makefile 0.56.20180123.12/btool/Util/Makefile
 0.53.201204142/btool/Util/Makefile 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/Util/Makefile 20170509 11:46:22.000000000 +0000
@@ 2,6 +2,9 @@
# This makefile is meant to be used only when accessed
# through a symbolic link from an immediate subdirectory.
+CXXFLAGS += std=c++11
+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
warn := Wall Wextra
include ../Makefileoptim
@@ 15,7 +18,7 @@ ifneq ($(strip $(filterout $(clean), $(
include $(alld)
endif
%.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o
cleanless:
rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/btool/Util/TeX_atom.cc 0.56.20180123.12/btool/Util/TeX_atom.cc
 0.53.201204142/btool/Util/TeX_atom.cc 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/btool/Util/TeX_atom.cc 20170509 11:46:22.000000000 +0000
@@ 76,11 +76,11 @@ void Util::tokenize_TeX(
}
void Util::TeX_atom_nonterminal::init(
 vector ::const_iterator p,
 const vector::const_iterator end
+ vector ::const_iterator p,
+ const vector::const_iterator end
) {
int level = 0;
 vector::const_iterator q = end;
+ vector::const_iterator q = end;
for ( ; p != end; ++p ) {
if ( *p == "{" ) {
if ( !level ) q = p+1;
@@ 114,20 +114,20 @@ Util::TeX_atom_nonterminal::~TeX_atom_no
Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
const string &line
) {
 vector tokens;
+ vector tokens;
tokenize_TeX( line, &tokens, TRANSLATE_NOBREAKSPACE );
init( tokens.begin(), tokens.end() );
}
Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
 const vector &tokens
+ const vector &tokens
) {
init( tokens.begin(), tokens.end() );
}
Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
 const vector::const_iterator begin,
 const vector::const_iterator end
+ const vector::const_iterator begin,
+ const vector::const_iterator end
) {
init( begin, end );
}
diff pruN 0.53.201204142/debian/changelog 0.56.20180123.12/debian/changelog
 0.53.201204142/debian/changelog 20150601 19:27:07.000000000 +0000
+++ 0.56.20180123.12/debian/changelog 20190105 01:00:00.000000000 +0000
@@ 1,3 +1,113 @@
+derivations (0.56.20180123.12) unstable; urgency=low
+
+ * Reconfigured and rebuilt for upload to Debian sid.
+ * Conformed to StandardsVersion 4.3.0.
+ * Conformed btool/ to sid's Poppler, version 0.69 (closes: #884476).
+ Required at least this version of Poppler to build.
+
+  Thaddeus H. Black Sat, 05 Jan 2019 01:00:00 +0000
+
+derivations (0.56.20180123.11) experimental; urgency=none
+
+ * Updated to the new upstream version.
+ * Conformed to StandardsVersion 3.9.8.
+ * In debian/control, required build dependencies to have advanced at
+ least as far as the versions shipped with Debian stretch stable.
+ (Reason: to build the present issue on older Debians has not been
+ tried by the package's maintainer.)
+ * Noticing that libpopplerprivatedev, version 0.69, breaks the
+ build, observing that version 0.48 does not break it, and remaining
+ unsure regarding versions between, forbade versions of
+ libpopplerprivatedev other than 0.48. (Buster's derivations
+ is to relieve the restriction but the present revision, never
+ uploaded to Debian's archive, chiefly targets stretch stable.)
+ * DeQuilted the source. Observed that this does not mean that Quilt
+ can no longer be used  for some future revisions of the package
+ are likely to be reQuilted  but it does mean that spurious Quilt
+ files and directories no longer need to be (and no longer should
+ be) left lying about during times at which there are no patches for
+ Quilt to manage.
+ * Added the script debian/helper/rmquilt to help with future
+ deQuilting if and as needed. Added a corresponding paragraph
+ to debian/helper/RelSteps.
+ * To avoid confusion, emphasized here that the package's Debian
+ source format remains 3.0 (quilt) whether or not Quilt has patches
+ to manage. It's just that Quilt's *files* can now be omitted when
+ there are no patches.
+ * Updated debian/copyright to point the user to the source's
+ current location.
+ * Advanced to build using Debhelper 10.
+
+  Thaddeus H. Black Sat, 05 Jan 2019 00:00:00 +0000
+
+derivations (0.56.201801231) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+
+  Thaddeus H. Black Tue, 23 Jan 2018 00:00:00 +0000
+
+derivations (0.55.201801171) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+ * Corrected debian/helper/RelSteps in various minor ways, for example
+ to report the new path to the helper scripts and to refer to the new
+ package format.
+
+  Thaddeus H. Black Wed, 17 Jan 2018 00:00:00 +0000
+
+derivations (0.55.201707031) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+
+  Thaddeus H. Black Mon, 03 Jul 2017 00:00:00 +0000
+
+derivations (0.55.201706201) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+
+  Thaddeus H. Black Tue, 20 Jun 2017 00:00:00 +0000
+
+derivations (0.55.201706121) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+
+  Thaddeus H. Black Mon, 12 Jun 2017 00:00:00 +0000
+
+derivations (0.55.201705301) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+
+  Thaddeus H. Black Tue, 30 May 2017 00:00:00 +0000
+
+derivations (0.54.201705081) experimental; urgency=none
+
+ * Updated to the new upstream version. Refer to doc/changelog in the
+ source or to /usr/share/doc/derivations/changelog.gz in the binary
+ for details.
+ * Because the .ps format is not much used any more as far as the
+ writer knows, excluded the .ps from the binary package. (Still
+ included the .pdf, of course.)
+ * In consideration of helpful NMUs, reconfigured some parts of the
+ build process for the time being so that they make more sense to
+ the package maintainer than to the NMUer.
+ * Relocated helper/ as debian/helper.
+ * Conformed the Description in debian/control to the new upstream
+ version's selfdescription.
+
+  Thaddeus H. Black Mon, 08 May 2017 00:00:00 +0000
+
derivations (0.53.201204142) unstable; urgency=low
* Thanked Pino Toscano and Michael Gilbert for their helpful NMUs.
@@ 5,7 +115,7 @@ derivations (0.53.201204142) unstable;
last XRef table (closes: #668907). Recognized Salvatore Bonaccorso for
drawing attention to this bug.
  Thaddeus H. Black Mon, 1 Jun 2015 00:00:00 +0000
+  Thaddeus H. Black Mon, 01 Jun 2015 00:00:00 +0000
derivations (0.53.201204141.2) unstable; urgency=medium
@@ 45,7 +155,7 @@ derivations (0.52.201003101.1) unstable
+ Fix FTBFS with g++4.5.
+ Fix FTBFS with ld asneeded.
+ Don't build with Werror.
 * Added'poppler_api_change' patch to solve a libpoppler API
+ * Added 'poppler_api_change' patch to solve a libpoppler API
change (Closes: #652081).
* Use 'dh_prep' instead of the deprecated 'dh_clean k'.
* Simplified build dependencies, as suggested by Frank KÃ¼ster (Closes:
diff pruN 0.53.201204142/debian/compat 0.56.20180123.12/debian/compat
 0.53.201204142/debian/compat 20120218 10:02:57.000000000 +0000
+++ 0.56.20180123.12/debian/compat 20190105 00:00:00.000000000 +0000
@@ 1 +1 @@
5
+10
diff pruN 0.53.201204142/debian/control 0.56.20180123.12/debian/control
 0.53.201204142/debian/control 20150601 19:24:29.000000000 +0000
+++ 0.56.20180123.12/debian/control 20190105 01:00:00.000000000 +0000
@@ 2,26 +2,29 @@ Source: derivations
Section: doc
Priority: optional
Maintainer: Thaddeus H. Black
BuildDepends: debhelper (>= 5)
BuildDependsIndep: texlivelatexbase, texlivepstricks, lmodern (>= 1.00), rubber (>= 1.1), ghostscript (>= 8.62), libpopplerprivatedev (>= 0.8.7), pkgconfig
StandardsVersion: 3.9.6
+BuildDepends: debhelper (>= 10)
+BuildDependsIndep: texlivelatexbase (>= 2016), texlivepstricks (>= 2016), lmodern (>= 2.004), rubber (>= 1.4), ghostscript (>= 9.20~dfsg), libpopplerprivatedev (>= 0.69), pkgconfig (>= 0.29)
+StandardsVersion: 4.3.0
Package: derivations
Section: doc
Priority: optional
Architecture: all
Depends: ${misc:Depends}
Suggests: evince  gv  pdfviewer  postscriptviewer, ghostscript
+Suggests: evince  pdfviewer
Description: book: Derivations of Applied Mathematics
 Understandably, program sources rarely derive the mathematical formulas
 they use. Not wishing to take the formulas on faith, a user might
 nevertheless reasonably wish to see such formulas somewhere derived.
+ For various valid reasons, opensource program sources rarely derive
+ the mathematical formulas they use. A user, nevertheless  not
+ wishing to take such formulas on faith  might wish to see such
+ formulas *somewhere* derived.
.
 Derivations of Applied Mathematics is a book which documents and
 derives many of the mathematical formulas and methods implemented in
 free software or used in science and engineering generally. It
 documents and derives the Taylor series (used to calculate
 trigonometrics), the NewtonRaphson method (used to calculate square
 roots), the Pythagorean theorem (used to calculate distances) and many
 others.
+ Derivations of Applied Mathematics is a book that derives, and
+ documents, many of the mathematical formulas and methods opensource
+ programs use, and indeed many of the formulas and methods used in
+ science and engineering generally. For example, it derives and
+ documents the Taylor series (used to calculate trigonometrics), the
+ NewtonRaphson method (used to calculate square roots), the Pythagorean
+ theorem (used to calculate distances) and many others.
+ .
+ The book's format is PDF.
diff pruN 0.53.201204142/debian/copyright 0.56.20180123.12/debian/copyright
 0.53.201204142/debian/copyright 20120415 16:57:25.000000000 +0000
+++ 0.56.20180123.12/debian/copyright 20190105 01:00:00.000000000 +0000
@@ 3,9 +3,9 @@ by Thaddeus H. Black
on Fri, 04 Nov 2005 00:00:00 +0000.
The original source can always be found at:
 ftp://ftp.debian.org/dists/unstable/main/source/
+ http://ftp.debian.org/debian/pool/main/d/derivations/
Copyright (C) 19832010 Thaddeus H. Black
+Copyright (C) 19832018 Thaddeus H. Black
License:
@@ 27,14 +27,3 @@ License:
On Debian systems, the complete text of the GNU General
Public License can be found in `/usr/share/commonlicenses/GPL2'.
The tex/xkeyval.{sty,tex} and associated source files
are Copyright (C) 20042008 Hendri Adriaens and licensed as follows.

 This work may be distributed and/or modified under the
 conditions of the LaTeX Project Public License, either version 1.3
 of this license or (at your option) any later version.
 The latest version of this license is in
 http://www.latexproject.org/lppl.txt
 and version 1.3 or later is part of all distributions of LaTeX
 version 2003/12/01 or later.

diff pruN 0.53.201204142/debian/derivations.docs 0.56.20180123.12/debian/derivations.docs
 0.53.201204142/debian/derivations.docs 20120218 10:02:57.000000000 +0000
+++ 0.56.20180123.12/debian/derivations.docs 20170509 18:34:49.000000000 +0000
@@ 1,3 +1,2 @@
tex/derivations.ps
tex/derivations.pdf
README
diff pruN 0.53.201204142/debian/helper/buffesrc 0.56.20180123.12/debian/helper/buffesrc
 0.53.201204142/debian/helper/buffesrc 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/buffesrc 20170509 16:39:40.000000000 +0000
@@ 0,0 +1,17 @@
+#! /bin/bash e
+
+# (See the notes in `./buffexpand'.)
+#
+# This script is successfully called from the main source directory as
+# `debian/helper/buffesrc'. It buffs and expands all the source
+# files, except that it only buffs the two Makefiles `Makefile'
+# and `debian/rules'.
+
+BUFFE='debian/helper/buffexpand M'
+for F in `find .` ; do
+ if [ f $F ] ; then
+ $BUFFE $F
+ echo $F
+ fi
+done
+
diff pruN 0.53.201204142/debian/helper/buffexpand 0.56.20180123.12/debian/helper/buffexpand
 0.53.201204142/debian/helper/buffexpand 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/buffexpand 20170520 13:54:44.000000000 +0000
@@ 0,0 +1,121 @@
+#! /usr/bin/perl
+
+# This Perl script buffs (removes trailing blanks from) and/or expands
+# (converts tabs to spaces in) the files named on the command line. It
+# also ensures that each file (if not empty) ends in a proper "\n".
+
+use warnings;
+use strict;
+use integer;
+
+our $tab = 8;
+our $pat_makefile_name = qr/Makefile(?:.*)?rules/;
+our $usage = <;
+ close FILE;
+ my $has_acted = 0;
+ my @buff_expanded = buff_expand $is_makefile, $has_acted, @line;
+ if ( $stdout ) {
+ print @buff_expanded;
+ }
+ elsif ( $has_acted ) {
+ open FILE, '>', $file
+ or warn( "$0: cannot write $file\n" ), next;
+ print FILE @buff_expanded;
+ close FILE;
+ }
+ }
+}
+else {
+ my @line = <>;
+ my $has_acted = 0;
+ print buff_expand 0, $has_acted, @line;
+}
+
diff pruN 0.53.201204142/debian/helper/Def.pm 0.56.20180123.12/debian/helper/Def.pm
 0.53.201204142/debian/helper/Def.pm 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/Def.pm 20180123 00:00:00.000000000 +0000
@@ 0,0 +1,24 @@
+use warnings;
+use strict;
+package Def;
+
+# This file defines common parameters for some of the helper scripts.
+
+our $out = 'derivations';
+our $full_title = 'Derivations of Applied Mathematics';
+our $author = 'Thaddeus H. Black';
+our $email = 'thb@derivations.org';
+our $email_deb = 'thb@debian.org';
+our $width = 72;
+our $basever = '0.56';
+our $main0 = 'main';
+our $bib0 = 'bib';
+our $dist = 'experimental';
+our $urgency = 'none';
+our $mansect = '7';
+our $name_readme = 'README';
+our $traditional_readme = 0;
+our $cl_entry_dflt = '[Add changelog entries here.]';
+
+1;
+
diff pruN 0.53.201204142/debian/helper/deprecated/checkcdf 0.56.20180123.12/debian/helper/deprecated/checkcdf
 0.53.201204142/debian/helper/deprecated/checkcdf 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/checkcdf 20170702 23:43:40.000000000 +0000
@@ 0,0 +1,20 @@
+#!/usr/bin/perl
+use warnings;
+use strict;
+
+our $x = shift(@ARGV);
+our $N = 80;
+
+my $twopi = 8.0*atan2(1.0,1.0);
+my $total = 0.5;
+my $term = $x/sqrt($twopi);
+
+for my $k (0 .. $N) {
+ if ($k > 0) {
+ $term *= ($x*$x)/(2.0*$k);
+ }
+ $total += $term/(2.0*$k+1.0);
+}
+
+print "$total\n";
+
diff pruN 0.53.201204142/debian/helper/deprecated/check_gj.m 0.56.20180123.12/debian/helper/deprecated/check_gj.m
 0.53.201204142/debian/helper/deprecated/check_gj.m 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/check_gj.m 20180113 13:52:04.000000000 +0000
@@ 0,0 +1,159 @@
+m=4; n=5; % or n=4;
+A=randn(m,n)+sqrt(1.0)*randn(m,n);
+P=eye(m,m);
+D=eye(m,m);
+L=eye(m,m);
+U=eye(m,m);
+I=A;
+K=eye(n,n);
+S=eye(n,n);
+Pv=eye(m,m);
+Dv=eye(m,m);
+Lv=eye(m,m);
+Uv=eye(m,m);
+Kv=eye(n,n);
+Sv=eye(n,n);
+
+i=1;
+
+% step 5
+T=eye(m,m); T(i,i)=I(i,i);
+Tv=eye(m,m); Tv(i,i)=1.0/I(i,i);
+D=D*T; Dv=Tv*Dv;
+L=Tv*L*T; Lv=Tv*Lv*T;
+I=Tv*I;
+
+% step 6
+T=eye(m,m);
+Tv=eye(m,m);
+for p = i+1:m;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T*T1;
+ Tv=T1v*Tv;
+endfor;
+L=L*T; Lv=Tv*Lv;
+I=Tv*I;
+
+i=2;
+
+% step 3
+p=3; q=m;
+Tpi=eye(m,m); Tpi(p,p)=0.0; Tpi(i,i)=0.0; Tpi(p,i)=1.0; Tpi(i,p)=1.0;
+Tiq=eye(n,n); Tiq(i,i)=0.0; Tiq(q,q)=0.0; Tiq(i,q)=1.0; Tiq(q,i)=1.0;
+P=P*Tpi; Pv=Tpi*Pv;
+L=Tpi*L*Tpi; Lv=Tpi*Lv*Tpi;
+I=Tpi*I*Tiq;
+S=Tiq*S; Sv=Sv*Tiq;
+
+% step 5
+T=eye(m,m); T(i,i)=I(i,i);
+Tv=eye(m,m); Tv(i,i)=1.0/I(i,i);
+D=D*T; Dv=Tv*Dv;
+L=Tv*L*T; Lv=Tv*Lv*T;
+I=Tv*I;
+
+% step 6
+T=eye(m,m);
+Tv=eye(m,m);
+for p = i+1:m;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T*T1;
+ Tv=T1v*Tv;
+endfor;
+L=L*T; Lv=Tv*Lv;
+I=Tv*I;
+
+i=3;
+
+% step 5
+T=eye(m,m); T(i,i)=I(i,i);
+Tv=eye(m,m); Tv(i,i)=1.0/I(i,i);
+D=D*T; Dv=Tv*Dv;
+L=Tv*L*T; Lv=Tv*Lv*T;
+I=Tv*I;
+
+% step 6
+T=eye(m,m);
+Tv=eye(m,m);
+for p = i+1:m;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T*T1;
+ Tv=T1v*Tv;
+endfor;
+L=L*T; Lv=Tv*Lv;
+I=Tv*I;
+
+i=m;
+
+% step 5
+T=eye(m,m); T(i,i)=I(i,i);
+Tv=eye(m,m); Tv(i,i)=1.0/I(i,i);
+D=D*T; Dv=Tv*Dv;
+L=Tv*L*T; Lv=Tv*Lv*T;
+I=Tv*I;
+
+i=m;
+r=i;
+
+% step 10
+T=eye(m,m);
+Tv=eye(m,m);
+for p = 1:i1;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T1*T;
+ Tv=Tv*T1v;
+endfor;
+U=U*T; Uv=Tv*Uv;
+I=Tv*I;
+
+i=3;
+
+% step 10
+T=eye(m,m);
+Tv=eye(m,m);
+for p = 1:i1;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T1*T;
+ Tv=Tv*T1v;
+endfor;
+U=U*T; Uv=Tv*Uv;
+I=Tv*I;
+
+i=2;
+
+% step 10
+T=eye(m,m);
+Tv=eye(m,m);
+for p = 1:i1;
+ T1=eye(m,m); T1(p,i)=I(p,i);
+ T1v=eye(m,m); T1v(p,i)=I(p,i);
+ T=T1*T;
+ Tv=Tv*T1v;
+endfor;
+U=U*T; Uv=Tv*Uv;
+I=Tv*I;
+
+% step 12
+T=eye(n,n);
+Tv=eye(n,n);
+for q=(r+1:n);
+ for p=(1:r);
+ T(p,q)=I(p,q);
+ Tv(p,q)=I(p,q);
+ endfor;
+endfor;
+I=I*Tv;
+K=T*K; Kv=Kv*Tv;
+
+Ir = zeros(m,n);
+for p=(1:m); Ir(p,p)=1.0; endfor;
+Gl = P*D*L*U*eye(m,m);
+Gr = Ir*K*S;
+
+HrInr = zeros(n,nm); HrInr(n)=1;
+
diff pruN 0.53.201204142/debian/helper/deprecated/checktaylorshift 0.56.20180123.12/debian/helper/deprecated/checktaylorshift
 0.53.201204142/debian/helper/deprecated/checktaylorshift 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/checktaylorshift 20170604 04:33:41.000000000 +0000
@@ 0,0 +1,55 @@
+#!/usr/bin/perl
+use warnings;
+use strict;
+
+my $a2m = 0.31;
+my $a1m = 0.42;
+my $a0 = 1.14;
+my $a1 = 0.97;
+my $a2 = 0.55;
+my $a3 = 0.26;
+
+my $z = 2.09;
+my $zo = 2.0;
+my $z1 = 2.3;
+my $d = $z1  $zo;
+my $jmax= 180;
+
+my $c2m = (1.0/($d*$d))*$a2m;
+my $c1m = (1.0/$d)*$a1m;
+my $c0 = $a0;
+my $c1 = $d*$a1;
+my $c2 = $d*$d*$a2;
+my $c3 = $d*$d*$d*$a3;
+
+sub que ($) {
+ my $i = shift;
+ return $c1m + ($i+1.0)*$c2m;
+}
+sub p ($) {
+ my $i = shift;
+ $i == 0 and return $c0+$c1+$c2+$c3;
+ $i == 1 and return $c12.0*$c23.0*$c3;
+ $i == 2 and return $c2+3.0*$c3;
+ $i == 3 and return $c3;
+ return 0.0;
+}
+
+my $w = ($z$z1)/($zo$z1);
+
+my $res = 0.0;
+my $wj = 1.0;
+for (my $j = 0; $j<=$jmax; ++$j) {
+ my $u1 = p($j);
+ my $u2 = que($j);
+ $res += ($u1+$u2)*$wj;
+ $wj *= $w;
+}
+
+print "$res\n";
+
+my $pre = $a2m/(($z$zo)*($z$zo)) + $a1m/($z$zo) + $a0 + $a1*($z$zo)
+ + $a2*($z$zo)*($z$zo) + $a3*($z$zo)*($z$zo)*($z$zo);
+
+print "$pre\n";
+
diff pruN 0.53.201204142/debian/helper/deprecated/closesource 0.56.20180123.12/debian/helper/deprecated/closesource
 0.53.201204142/debian/helper/deprecated/closesource 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/closesource 20170509 16:25:36.000000000 +0000
@@ 0,0 +1,68 @@
+
+# This bash shell pseudoscript closes the development source tree for
+# subsequent building. Note that you can close the source as many times
+# as you like; you needn't (and shouldn't) reopen it each time.
+
+if [[ $MAINTENV != derivations ]] ; then
+ echo 1>&2 "Please run helper/maintenv first."
+ false
+elif [[ ! $PWD ef $E/$P$V.$D2 ]] ; then
+ echo 1>&2 "Please run from the top source directory $E/$D2."
+ false
+else
+
+ if [[ $D3 == '' ]] ; then D3=$D2 ; fi
+ if [[ $VD == '' ]] ; then VD=1 ; fi
+ if echo n $VD  grep q '[^09]' ; then
+ echo 1>&2 "The \$VD must be a nonnegative integer."
+ false
+ else
+
+ if [[ $(( $VD > 1 )) == 0 ]] ; then
+
+ if [[ $D3 != $D2 && e $E/$D3 ]] ; then
+ echo 1>&2 "Sorry, but $E/$D3 already exists."
+ false
+ else
+ if [[ $D3 != $D2 ]] ; then
+ cd ..
+ mv $P$V.{$D2,$D3}
+ rm f $DIFF
+ cd $P$V.$D3
+ fi
+ helper/updatedate $D3
+ D2=$D3
+ helper/buffesrc >/dev/null
+ debian/rules origforce
+ mkdiff
+ fi
+
+ else
+
+ if [[ e $E/$P$V.$D1$(( $VD  1 )) && $D2 != $D1 ]] ; then
+ echo 1>&2 "Sorry, but \$D2 != \$D1,"
+ echo 1>&2 "yet $E/$P$V.$D1$(( $VD  1 )) already exists."
+ false
+ else
+ rm Rf $E/$P$V.$D1$(( $VD  1 ))
+ if [[ $D2 != $D1 ]] ; then
+ cd ..
+ mv $P$V.$D1{,$(( $VD  1 ))}
+ mv $P$V.{$D2,$D1}
+ cd $P$V.$D1
+ fi
+ helper/updatedebver $VD
+ helper/updatedate $D1
+ helper/updatedate ds $D2
+ D2=$D1
+ D3=$D1
+ helper/buffesrc >/dev/null
+ mkdiff
+ fi
+
+ fi
+
+ fi
+
+fi
+
diff pruN 0.53.201204142/debian/helper/deprecated/cubic.m 0.56.20180123.12/debian/helper/deprecated/cubic.m
 0.53.201204142/debian/helper/deprecated/cubic.m 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/cubic.m 20170609 17:01:10.000000000 +0000
@@ 0,0 +1,23 @@
+#!/usr/bin/octave
+function z = cubic(a)
+ aaa = a/3.0;
+ P = aaa(2)  aaa(1)^2
+ Q = (a(3) + 3.0*aaa(2)*aaa(1)  2.0*aaa(1)^3) / 2.0
+ p = 3.0*P
+ q = 2.0*Q
+ P3 = P^3
+ Q2 = Q^2
+ www = 0.0;
+ w1 = 0.0;
+ if (Q < 0.0)
+ www = Q  sqrt(Q^2 + P^3)
+ w1 = (www)^(1.0/3.0);
+ else
+ www = Q + sqrt(Q^2 + P^3)
+ w1 = www^(1.0/3.0);
+ endif;
+ fourhour = (1.0 + i*sqrt(3.0)) / 2.0;
+ w = w1 * [1.0 fourhour fourhour']
+ x = w  P ./ w
+ z = x  aaa(1);
+endfunction;
diff pruN 0.53.201204142/debian/helper/deprecated/figeuclid 0.56.20180123.12/debian/helper/deprecated/figeuclid
 0.53.201204142/debian/helper/deprecated/figeuclid 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/figeuclid 20170524 22:52:50.000000000 +0000
@@ 0,0 +1,35 @@
+#!/usr/bin/perl
+use warnings;
+use strict;
+sub asin {
+ my $sin = shift;
+ my $cos = sqrt(1.0  $sin*$sin);
+ my $tan = $sin/$cos;
+ return atan2($tan, 1.0);
+}
+my $base=1.25;
+my $height=0.75;
+my $a=1.8;
+my $b=2.7;
+#my $d=(@ARGV?shift(@ARGV):0.0);
+my $d=((2.00.5)/2.0)/72.27*2.54;
+our $pi = 4.0*atan2(1.0,1.0);
+my $theta=15.0*$pi/180.0;
+my $thleft=$pi/2.0+$theta;
+my $left=$height/cos($theta);
+my $right=sqrt($left*$left+$base*$base2*$left*$base*cos($thleft));
+my $thright=asin($left*sin($thleft)/$right);
+my $thtop=asin($base*sin($thleft)/$right);
+my $thl2=$thleft/2.0$pi;
+my $thr2=$thright/2.0;
+my $tht2=( ($thleft) + ($pi$thright) )/2.0;
+my $c=$a+$b;
+sub printline {
+ my($s,$l1,$l2,$d1) = @_;
+ print "\\nc\\tt${l1}x{" . (($s*$left)*(cos($thleft))) . "} \\nc\\tt${l1}y{". (($s*$left)*sin($thleft)) . "} \\nc\\tt${l2}x{". ($s*$base) . "}\n";
+}
+print "\\nc\\ttax{" . (($c*$left)*(cos($thleft))$d*cos($tht2)/sin($thtop/2.0)) . "} \\nc\\ttay{". (($c*$left)*sin($thleft)+$d*sin($tht2)/sin($thtop/2.0)) . "}\n";
+print "\\nc\\ttbx{". (($c*$base)+$d*cos($thr2)/sin($thright/2.0)) . "} \\nc\\ttby{". ($d*sin($thr2)/sin($thright/2.0)) . "}\n";
+print "\\nc\\ttvx{". ($d*cos($thl2)/sin($thleft/2.0)) . "} \\nc\\ttvy{". ($d*sin($thl2)/sin($thleft/2.0)) . "}\n";
+printline $b, 'c', 'd', 0.0;
+printline $a, 'e', 'f', 0.0;
diff pruN 0.53.201204142/debian/helper/deprecated/maintenv 0.56.20180123.12/debian/helper/deprecated/maintenv
 0.53.201204142/debian/helper/deprecated/maintenv 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/maintenv 20170509 16:25:36.000000000 +0000
@@ 0,0 +1,65 @@
+
+# This bash shell pseudoscript sets some useful environment
+# variables for derivations package maintenance. You run it
+# by ". helper/maintenv".
+#
+# Before running this pseudoscript you must set the following:
+#
+# D1 the old version date (for example, 19700101)
+# D2 the prospective new version date
+#
+# (It would be neater to make this and the associated pseudoscripts real
+# scripts, but it would also be less useful from the author's point of
+# view. Remember that this is just a development helper. The reason to
+# use pseudoscripts is to keep the various shell variables and functions
+# in the shell's global namespace where the maintainer can manipulate
+# them directly. If you don't want your shell's namespace cluttered,
+# you can spawn a subshell before invoking the pseudoscript.)
+
+if [[ $D1 == ''  $D2 == '' ]] ; then
+ echo 1>&2 "Please set D1 and D2 before running this pseudoscript."
+ false
+elif echo n $VD  grep q '[^09]' ; then
+ echo 1>&2 "The \$VD must be a nonnegative integer."
+ false
+elif [[ $D1 == $D2 && ( $VD == ''  $(( $VD > 1 )) == 0 ) ]] ; then
+ echo 1>&2 "Please set either \$VD > 1 or \$D2 != \$D1."
+ false
+else
+
+ DVLARCH=i386 # the maintainer's own machine architecture
+ E=~/der # the maintainer's development superdirectory
+ P=derivations # the package name
+ V=0.5 # the base version number
+
+ # Derive additional variables.
+ if [[ $VD == '' ]] ; then VD=1 ; fi
+
+ # Define a shell function to build a diff.
+ function mkdiff {
+ if [[ x helper/buffesrc ]] ; then
+ if [[ $(( $VD > 1 )) == 0 ]] ; then
+ DIFF=$E/$P$V.$D2.diff
+ (
+ cd ..
+ diff ruN $P$V.{$D1,$D2} >$DIFF
+ )
+ else
+ DIFF=$E/$P$V.$D1$VD.diff
+ (
+ cd ..
+ diff ruN $P$V.$D1{$(( $VD  1 )),} >$DIFF
+ )
+ fi
+ else
+ echo 1>&2 "You must run this shell function from the top source"
+ echo 1>&2 "directory, and helper/buffesrc must be executable."
+ false
+ fi
+ }
+
+ # Set a flag to mark that the pseudoscript has been run.
+ MAINTENV=$P
+
+fi
+
diff pruN 0.53.201204142/debian/helper/deprecated/makereadme 0.56.20180123.12/debian/helper/deprecated/makereadme
 0.53.201204142/debian/helper/deprecated/makereadme 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/makereadme 20170509 16:44:49.000000000 +0000
@@ 0,0 +1,273 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# (This script may be obsolete.)
+#
+# This script generates an appropriate README from the following listed
+# sections of the manpage. It clobbers the existing README.
+#
+# (This highly specialized helper script is perhaps the result of
+# overenthusiasm. It automates an otherwise slightly annoying
+# packagemaintenance task, but in retrospect it is not clear that the
+# effort spent in writing the script justifies the gain. Nevertheless,
+# I like the script. Here it is. THB)
+#
+# (By the way, I thought about extending the script to autogenerate the
+# long description in debian/control. However, overenthusiasm has
+# bounds. The long description is twenty times as important as the
+# README. It merits manual crafting. But maybe we should generate the
+# manpage "SUMMARY" section from the long description? No, not
+# today. THB)
+#
+# The Makefile and debian/rules probably should not invoke this script.
+# Probably only the developer should invoke it, manually, if and when he
+# wants to.
+#
+# As a developer, you do not need to use this script. You can write
+# your own README if you want to. The only reason the script exists is
+# that the author couldn't really think of anything at the moment to
+# write in the README which wasn't already in the manpage, but if you
+# can think of something else to write there, go right ahead. However,
+# if you do use this script and if you modify it, note the "Make special
+# corrections" block below.
+#
+# One possible use of this script is to autogenerate a candidate README
+# which you then manually edit.
+
+# Relevant manpage sections.
+our @sh = (
+ 'DESCRIPTION',
+ 'READING THE BOOK',
+ 'AUTHOR',
+);
+
+our $marker = "\001";
+our $headlead_trad = 'The Debian Package';
+our $mark_lic = qr/^Copyright\s+\(C\)/;
+our $time_dflt = '00:00:00 +0000';
+our $cmd_date = 'date uRd';
+our $cmd_fmt = "fmt w${Def::width} u";
+our $cmd_tempfile = 'tempfile';
+
+my $manpage = "${FindBin::RealBin}/../doc/${Def::out}.${Def::mansect}";
+my $deb_cprt = "${FindBin::RealBin}/../debian/copyright";
+my $readme = "${FindBin::RealBin}/../${Def::name_readme}";
+my $bar = '' x ${Def::width} ."\n";
+my $mp_date;
+my $mp_author;
+my $mp_title;
+
+# Subroutine: splice lines ending in backslashnewline.
+sub splice_lines (;\@) {
+ local $_ = @_ ? shift : \$_;
+ for my $i ( reverse 0 .. $#$_ ) {
+ chomp $_>[$i];
+ next unless $_>[$i] =~ /\\$/;
+ chop $_>[$i];
+ splice @$_, $i, 2, $_>[$i] . $_>[$i+1] . "\n" if $i < $#$_;
+ }
+ $_ .= "\n" for @$_;
+ return $_;
+}
+
+our @escape_save = ();
+# Subroutines: recognize, convert, save and restore escaped characters.
+sub escape (;\$) {
+ local $_ = @_ ? shift : \$_;
+ @escape_save = ();
+ $$_ =~ /$marker/ and die "$0: marker character is reserved\n";
+ my $ends_newline = $$_ =~ /\n\z/;
+ chomp $$_;
+ {
+ my $i;
+ while ( ( $i = index $$_, '\\' ) >= 0 ) {
+ substr( $$_, $i, 5 ) =~ /^\\\*\(/
+ and push( @escape_save, substr( $$_, $i, 5, $marker ) ), next;
+ substr( $$_, $i, 4 ) =~ /^\\\(/
+ and push( @escape_save, substr( $$_, $i, 4, $marker ) ), next;
+ push( @escape_save, substr( $$_, $i, 2, $marker ) );
+ }
+ }
+ $$_ .= "\n" if $ends_newline;
+ return $$_;
+}
+sub convescape () {
+ for ( @escape_save ) {
+ $_ =~ /^\\&$/ and $_ = '' , next;
+ $_ =~ /^\\$/ and $_ = '' , next;
+ $_ =~ /^\\\(em$/i and $_ = '', next;
+ $_ =~ /^\\\*\(lq$/i and $_ = '"' , next;
+ $_ =~ /^\\\*\(rq$/i and $_ = '"' , next;
+ }
+}
+sub unescape (;\$) {
+ local $_ = @_ ? shift : \$_;
+ while ( @escape_save ) {
+ my $c = shift @escape_save;
+ $$_ =~ s/$marker/$c/;
+ }
+ @escape_save = ();
+ return $$_;
+}
+sub convall (;\$) {
+ local $_ = @_ ? shift : \$_;
+ defined $$_ or return $$_;
+ escape $$_;
+ convescape ;
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: dequote a quoted string.
+sub dequote (;\$) {
+ local $_ = @_ ? shift : \$_;
+ chomp $$_;
+ escape $$_;
+ $$_ =~ s/^\s*"([^"]*?)"\s*$/$1/;
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: collapse an alternating emphasizor.
+sub collapse (;\$) {
+ local $_ = @_ ? shift : \$_;
+ chomp $$_;
+ escape $$_;
+ my @w = $$_ =~ /"[^"]*?"[^"\s]+/g;
+ dequote for @w;
+ $$_ = join( '', @w );
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: format text to a maximum width.
+sub format_text (@) {
+ my $file = `$cmd_tempfile`; chomp $file;
+ open FILE, '>', $file;
+ print FILE @_;
+ close FILE;
+ my @ret = `$cmd_fmt $file`;
+ unlink $file;
+ return @ret;
+}
+
+# Read the manpage in.
+my @man;
+open MAN, '<', $manpage;
+ @man = ;
+close MAN;
+splice_lines @man;
+
+# Parse the manpage.
+my %sect;
+{
+ my $sh;
+ my $text = [];
+ for ( @man ) {
+ next unless /\S/;
+ my( $cmd, $arg ) = /^\.(\S+)(?:\s+(\S(?:.*?\S)??))??\s*$/;
+ if ( defined $cmd ) {
+ if ( $cmd =~ /^(?:BI)$/i ) {
+ dequote $arg;
+ $_ = "$arg\n";
+ $cmd = undef;
+ }
+ elsif ( $cmd =~ /^(?:BRRBIRRIBIIB)$/i ) {
+ collapse $arg;
+ $_ = "$arg\n";
+ $cmd = undef;
+ }
+ elsif ( $cmd =~ /^TH$/i ) {
+ ( $mp_date, $mp_author, $mp_title ) = $arg =~
+ /^.*"([^()"]*?)"\s*"([^()"]*?)"\s*"([^()"]*?)"\s*$/
+ or die "$0: cannot parse .TH line";
+ }
+ elsif ( $cmd =~ /^SH$/i ) {
+ $sect{$sh} = $text if defined $sh;
+ $text = [];
+ $sh = $arg;
+ dequote $sh;
+ }
+ elsif ( $cmd =~ /^PP$/i ) {
+ $_ = undef;
+ $cmd = undef;
+ }
+ # (Ignore lines beginning with other commands.)
+ }
+ push @$text, $_ unless defined $cmd;
+ }
+ $sect{$sh} = $text if defined $sh;
+ $text = undef;
+ $sh = undef;
+}
+
+# If debian/copyright exists, pull licensing text from it.
+my @lic;
+if ( e $deb_cprt ) {
+ my @lic0;
+ open CPRT, '<', $deb_cprt;
+ {
+ my $in = '';
+ while ( ) {
+ $in = '1' if /$mark_lic/;
+ $in or next;
+ push @lic0, $_;
+ }
+ }
+ close CPRT;
+ @lic = format_text @lic0;
+ unshift @lic, $bar, "\n";
+ push @lic, "\n";
+}
+
+# Calculate the manpage date, then prepare the readme's header and
+# footer.
+my $date = `$cmd_date '$mp_date $time_dflt'`; chomp $date;
+my @head = (
+ $Def::traditional_readme
+ ? $headlead_trad . " ${Def::out}\n"
+ : ${Def::full_title} . "\n"
+);
+my @foot = ( "${Def::author} <${Def::email}>\n", "$date\n" );
+if ( $Def::traditional_readme ) {
+ push @head, '' x (length($head[0])1) . "\n";
+}
+else {
+ unshift @head, $bar;
+ unshift @head, "\n";
+ push @head, $bar;
+ unshift @foot, $bar;
+ push @foot, "\n";
+}
+push @head, "\n";
+
+# Make special corrections.
+if ( defined $sect{'AUTHOR'} ) {
+ for ( @{ $sect{'AUTHOR'} } ) {
+ next if s/^(The book) (and this manpage are\b)/$1 is/;
+ next if s/^(${Def::out})$/'$1'/;
+ next if s/^(in which) (they are) (distributed.)/$1 the book is $3/;
+ }
+}
+
+# Build the readme.
+my @body0;
+for my $sh ( @sh ) {
+ defined $sect{$sh} or next;
+ convall for @{ $sect{$sh} };
+ push @body0, map { defined() ? $_ : "\n" } @{ $sect{$sh} };
+ push @body0, "\n";
+}
+my @body = format_text @body0;
+my @readme = ( @head, @body, @lic, @foot );
+
+# Write the readme out.
+open README, '>', $readme;
+ print README @readme;
+close README;
+
diff pruN 0.53.201204142/debian/helper/deprecated/opennewsource 0.56.20180123.12/debian/helper/deprecated/opennewsource
 0.53.201204142/debian/helper/deprecated/opennewsource 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/opennewsource 20170509 16:25:36.000000000 +0000
@@ 0,0 +1,22 @@
+
+# This bash shell pseudoscript opens a new development source tree.
+
+if [[ $MAINTENV != derivations ]] ; then
+ echo 1>&2 "Please run helper/maintenv first."
+ false
+elif [[ ! $PWD ef $E/$P$V.$D1 ]] ; then
+ echo 1>&2 "Please run from the top source directory $E/$D1."
+ false
+elif [[ e $P$V.$D2 ]] ; then
+ echo 1>&2 "Sorry, but $P$V.$D2 already exists."
+ false
+else
+ # Open a full new upstream tree.
+ cd ..
+ cp a $P$V.{$D1,$D2}
+ cd $P$V.$D2
+ bash  helper/letexec
+ helper/extendchangelog
+ helper/updatedate c $D2
+fi
+
diff pruN 0.53.201204142/debian/helper/deprecated/README 0.56.20180123.12/debian/helper/deprecated/README
 0.53.201204142/debian/helper/deprecated/README 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/README 20170609 10:43:46.000000000 +0000
@@ 0,0 +1,11 @@
+
+The author thought that these files were a good idea
+when he made them, but they have not proven as useful as
+he had thought they would. The maintenv script might
+still be useful. The other files probably are less
+useful, but for now they are kept here.
+
+Some of the files are programs or scripts the book's
+author used during development to check that some
+formula he was typesetting was correct.
+
diff pruN 0.53.201204142/debian/helper/deprecated/texcalcs/correlation.tex 0.56.20180123.12/debian/helper/deprecated/texcalcs/correlation.tex
 0.53.201204142/debian/helper/deprecated/texcalcs/correlation.tex 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/texcalcs/correlation.tex 20180113 13:52:04.000000000 +0000
@@ 0,0 +1,162 @@
+\documentclass{article}
+\usepackage{amsmath}
+\begin{document}
+
+\noindent
+Substitutions:
+\[
+ \begin{split}
+ \phi &\leftarrow \psi + \frac \theta 2
+ \\
+ \frac \theta 2 &= \phi  \psi
+ \\
+ \theta &= 2\phi  2\psi
+ \\
+ d\theta &= 2d\phi
+ \\
+ \psi  \frac \theta 2 &= \psi  (\phi  \psi) = 2\psi  \phi
+ \\
+ \chi &\leftarrow \phi2\psi
+ \\
+ d\chi &= 2\,d\psi
+ \end{split}
+\]
+Calculations:
+\[
+ \begin{split}
+ \lefteqn{\mathcal F \int h\left(\psi  \frac v 2\right)
+ f\left(\psi + \frac v 2\right)d\psi} &
+ \\&=
+ \frac 1{\sqrt{2\pi}}
+ \int e^{iv\theta}
+ \int h\left(\psi  \frac \theta 2\right)
+ f\left(\psi + \frac \theta 2\right)d\psi\,d\theta
+ \\&=
+ \frac 1{\sqrt{2\pi}}
+ \int\int e^{iv\theta}
+ h\left(\psi  \frac \theta 2\right)
+ f\left(\psi + \frac \theta 2\right) d\theta\,d\psi
+ \\&=
+ \frac 2{\sqrt{2\pi}}
+ \int e^{iv(2\phi2\psi)}
+ h\left(2\psi  \phi\right)
+ f\left(\phi\right) d\phi\,d\psi
+ \\&=
+ \frac 2{\sqrt{2\pi}}
+ \int e^{iv\phi}
+ f\left(\phi\right)
+ \int e^{iv(\phi2\psi)}
+ h\left(2\psi  \phi\right)
+ d\psi\,d\phi
+ \\&=
+ \frac 1{\sqrt{2\pi}}
+ \int e^{iv\phi}
+ f\left(\phi\right)
+ \int e^{iv\chi}
+ h\left( \chi \right)
+ d\chi\,d\phi
+ \\&=
+ \frac 1{\sqrt{2\pi}}
+ \int e^{iv\chi}
+ h\left( \chi \right)
+ d\chi
+ \int e^{iv\phi}
+ f\left(\phi\right)
+ d\phi
+ \end{split}
+\]
+
+\newpage
+\noindent
+Skew:
+\[
+ \begin{array}{ccccccccc}
+ \rightarrow &
+ \Phi(w) &
+ \rightarrow &
+ \phi(w) &
+ \rightarrow &
+ \Phi(w) &
+ \rightarrow &
+ \phi(w) &
+ \rightarrow \\
+ \rightarrow &
+ f(w) &
+ \rightarrow &
+ F(w) &
+ \rightarrow &
+ f(w) &
+ \rightarrow &
+ F(w) &
+ \rightarrow \\
+ \rightarrow &
+ \Sigma(w) &
+ \rightarrow &
+ \sigma(w) &
+ \rightarrow &
+ \Sigma(w) &
+ \rightarrow &
+ \sigma(w) &
+ \rightarrow \\
+ \rightarrow &
+ h(w) &
+ \rightarrow &
+ H(w) &
+ \rightarrow &
+ h(w) &
+ \rightarrow &
+ H(w) &
+ \rightarrow
+ \end{array}
+\]
+Skew:
+\[
+ \begin{array}{ccccccccc}
+ \rightarrow &
+ \Gamma(v) &
+ \rightarrow &
+ \gamma(v) &
+ \rightarrow &
+ \Gamma(v) &
+ \rightarrow &
+ \gamma(v) &
+ \rightarrow \\
+ \rightarrow &
+ g(v) &
+ \rightarrow &
+ G(v) &
+ \rightarrow &
+ g(v) &
+ \rightarrow &
+ G(v) &
+ \rightarrow
+ \end{array}
+\]
+Calculations:
+\[
+ \begin{split}
+ \gamma(v) &\equiv \int \sigma\left(\psi  \frac v 2\right)
+ \phi\left(\psi + \frac v 2\right)d\psi \\
+ \Gamma(v) &= \sqrt{2\pi}\Sigma(v)\Phi(v) \\
+ \gamma(v) &= \int \sigma\left(\psi + \frac v 2\right)
+ \phi\left(\psi  \frac v 2\right)d\psi \\
+ \Gamma(v) &= \sqrt{2\pi}\Sigma(v)\Phi(v) \\
+ G(v) &= \int H\left(\psi  \frac v 2\right)
+ F\left(\psi + \frac v 2\right)d\psi \\
+ g(v) &= \sqrt{2\pi}h(v)f(v) \\
+ G(v) &= \int H\left(\psi + \frac v 2\right)
+ F\left(\psi  \frac v 2\right)d\psi \\
+ g(v) &= \sqrt{2\pi}h(v)f(v)
+ \end{split}
+\]
+Conclusion:
+\[
+ \begin{split}
+ g(v) &\rightarrow G(v) \\
+ h(v)f(v) &\rightarrow \frac 1 {\sqrt{2\pi}}
+ \int H\left(\psi  \frac v 2\right)
+ F\left(\psi + \frac v 2\right)d\psi
+ \end{split}
+\]
+
+\end{document}
diff pruN 0.53.201204142/debian/helper/deprecated/texcalcs/Makefile 0.56.20180123.12/debian/helper/deprecated/texcalcs/Makefile
 0.53.201204142/debian/helper/deprecated/texcalcs/Makefile 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/deprecated/texcalcs/Makefile 20170624 23:23:03.000000000 +0000
@@ 0,0 +1,3 @@
+.PHONY: clean
+clean:; rm v *.aux *.dvi *.log
+correlation.dvi: correlation.tex; latex $^
diff pruN 0.53.201204142/debian/helper/dict 0.56.20180123.12/debian/helper/dict
 0.53.201204142/debian/helper/dict 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/dict 20180123 00:00:00.000000000 +0000
@@ 0,0 +1,1416 @@
+Aa
+ab
+aB
+ABCDEF
+Abelschen
+Abhijit
+Abramowitz
+absurdum
+ac
+aC
+accrete
+accretes
+adecimal
+Adel
+adjointed
+adjointing
+adjoints
+ae
+AEC
+af
+agn
+ahlen
+AHNT
+ai
+al
+alebox
+algebraist
+algebraists
+alggeo
+algorithmizes
+aliceinfo
+alphabetization
+alsj
+AMD
+amenably
+ampl
+amplitudinity
+amplitudinous
+amsmath
+AMSmath
+AMSmath's
+amssymb
+analyses
+Andr
+Andreas
+andrews
+André
+Ang
+antider
+antiderivative
+antiderivative's
+antiderivatives
+AP
+api
+applicationist
+applicationist's
+applicationists
+approximator
+apxe
+apxex
+aQ
+arccosh
+arccosine
+archexponent
+archyperbolic
+arcsinh
+arctanh
+Arfken
+arg
+Argand
+arithMETic
+arithmetizing
+arounds
+artillerist's
+astronautical
+ated
+Athanasios
+Atlee
+Atten
+Augustin
+Automake
+automatable
+autotransform
+autotransforms
+av
+az
+bA
+backportability's
+backporting
+bAI
+Bal
+Balachandran
+Balakumar
+Balanis
+Ballantine
+Banos
+Baños
+baroquity
+Barsoom
+basever
+Bb
+Bbb
+BC
+Beattie
+Begriffsschrift
+Beitr
+Bernhard
+bf
+bg
+bh
+BibTeX
+Bienia
+binary's
+binth
+binthe
+Birkhäuser
+BJam
+Bjarne
+bl
+Blacksburg
+Bladel
+Blaise
+bly
+Boca
+Bonaccorso
+boundedness
+BQ
+breview
+brouwer
+Brouwer's
+BSL
+bT
+bTI
+btool
+BU
+buffe
+buildpackage
+Bulmer
+Burlington
+businesspeople
+businessperson
+BW
+BWH
+byu
+ca
+CAC
+cade
+calculatable
+cally
+Cantorian
+Cardan
+Cardan's
+Cardano
+Cardano's
+Cardanus
+Carus
+cathen
+CatholicEncyc
+cauchy
+Cauchy's
+cauchyf
+cauchyn
+cc
+ccc
+ccccc
+ccccccc
+ccccccccc
+ccccccccccccc
+cd
+CDF
+Ce
+cern
+cexp
+ch
+changelog
+changelogs
+Cholesky
+Chrysippus
+Chs
+ci
+cidfmap
+cis
+cit
+citable
+citational
+Cition
+Civita
+Civita's
+cleandeb
+cleanless
+cleardoublepage
+Cloyne
+cls
+co
+coäuthor
+cois
+Colchester
+columnwise
+com
+Combinatorical
+comm
+commutivity
+complementarily
+composable
+conclu
+config
+congener
+congeners
+conj
+const
+contrib
+conventionalize
+convolutional
+coords
+coördinate
+coördinated
+coördinates
+COPYLEFT
+cor
+correlational
+cos
+cosh
+cosint
+countability
+counterchallenged
+Courant
+Courant's
+couth
+cp
+Cramer's
+CRC
+cred
+crrrrrrrrrc
+CT
+ctan
+curl's
+CX
+CXLVII
+cyc
+cyl
+Dasgupta
+Dasgupta's
+dcurl
+DCXCIII
+Dd
+ddiv
+ddsf
+ddvf
+de
+Debconf
+debhelper
+debian
+Debian's
+Debianization
+Debianized
+Debianizing
+debver
+DEDavis
+deëmphasized
+deëmphasizes
+deëmphasizing
+def
+deferral
+defp
+defpd
+defunbal
+defz
+defzunbal
+del
+deletable
+delp
+deltatr
+Demystified
+denormalization
+denormals
+Dequ
+der
+derivational
+des
+deSturler
+det
+deutschen
+dev
+deviational
+dexp
+df
+dfsg
+dg
+dh
+DHHS
+di
+diag
+diagn
+diagonal's
+diagonalizability
+diagonalization
+diagonalize
+diagonalized
+dict
+diff
+DIFFGZ
+dimen
+dimensionalities
+dimensionlessness
+Dirac's
+dird
+dirdrvtv
+Dirichlet's
+discretization
+discretize
+discretized
+discretizing
+discursion
+dists
+div
+divergenceless
+divg
+divthm
+DL
+dln
+dlnz
+DLUI
+documentclass
+Doetsch
+doko
+domega
+dotm
+dotmag
+dotre
+dp
+dP
+dpkg
+dr
+drvtv
+dS
+ds
+dsc
+dt
+du
+DuckCorp
+dup
+dv
+dvi
+DVLARCH
+dw
+dx
+dy
+dz
+dZ
+editional
+Edouard
+edu
+ee
+Egbertus
+Ehre
+Ei
+eigdet
+eigen
+eigensolution
+eigensolutions
+eigenvalued
+eigenvector's
+electromagnetics
+elementaries
+elementarized
+elementary's
+elfs
+emph
+en
+encyclopedically
+endeavour
+enkindle
+ent
+ents
+enumt
+Ep
+Eq
+eqn
+eqnarray
+eqns
+equidistance
+equiv
+er
+ere's
+erman
+esotericity
+esp
+etch's
+ete
+euler
+Euler's
+Eval
+EWW
+exch
+exp
+expint
+exps
+extremum
+Fabrice
+factorable
+factorials
+failproof
+fakeroot
+Feferman
+Feser
+Feser's
+ff
+FFFF
+fh
+fi
+FilesTex
+fillcolor
+fillstyle
+Fjeld
+fl
+foo
+footnotation
+Foresman
+formable
+forwardmost
+foundational
+fouri
+fourier
+fr
+Fraenkel
+Franciscus
+freg
+Frege
+Frege's
+Fregean
+Friedberg
+Friedrich
+frullani
+Frullani's
+FSF's
+FTBFS
+ftp
+fulltexts
+Gedanke
+Geistes
+genitival
+geo
+geometrie
+Geophys
+Georg
+Georgi
+getCString
+getKey
+gforge
+Gg
+gh
+ghostscript
+gibbs
+Gilbey
+gimbal
+gimbals
+Girolamo
+Giuliano
+GJ
+GJinv
+GJpart
+gjrank
+GJt
+GJtC
+gk
+Glenview
+Gorbag
+Gottfried
+Gottlob
+Goursat
+gov
+gpl
+gra
+Graeco
+grat
+greek
+grep
+groff
+grundlagen
+gt
+gtrsim
+Guillaume
+gv
+gz
+gzip
+Hafner
+Hamming's
+Hankel
+harvard
+hatchangle
+hatchwidth
+Heaviside's
+Hefferon
+Henk
+Henrik
+Hermann
+Hersh
+Hersh's
+hert
+Hessenberg
+Hestenes
+HFDavis
+Hh
+Hilbertian
+Hilburn
+hist
+Hm
+Hopman
+Houghton
+howpublished
+Hölder
+hq
+Hsu
+htm
+html
+http
+hubristic
+hy
+hyperbolics
+hypergeometric
+hyperlink
+hypersie
+i'j
+ia
+iA
+IAe
+iau
+iav
+iB
+ib
+ic
+ics
+Idealismus
+iep
+ifelse
+iff
+ij
+ijk
+ijn
+ik
+iK
+ikj
+im
+IMC
+imn
+implementational
+Imprimerie
+imum
+inbook
+incide
+Indep
+indicial
+infinitifold
+ing
+inria
+Insel
+int
+integ
+integrability
+integrand's
+integrands
+integrodifferential
+Interscience
+intinvz
+inttx
+invdet
+invertibility
+invxform
+ip
+Iref
+irreducibility
+irrotational
+isdraft
+iso
+Issai
+ited
+iu
+iuF
+iuf
+Iulian
+ive
+ively
+iveté
+ivF
+ivf
+iw
+iwh
+iy
+iz
+j'k
+JacksonEA
+jargonal
+jb
+ject
+jh
+ji
+jik
+jj
+jj'k
+JJH
+jjk
+jk
+jki
+jmax
+jn
+Jochens
+Jolley
+joshua
+Jothi
+jp
+jr
+jT
+jU
+ju
+ka
+kA
+karlscalculus
+Kernighan
+Kernighan's
+keyval
+kf
+kh
+Khamsi
+kI
+kij
+Kitaigorodskii
+kj
+kji
+kk
+kkt
+kkz
+Klose
+kn
+Knopp
+Knopp's
+Knoppix
+Kohler's
+kontinuum
+Korte
+Korté
+Korté's
+kp
+Krader
+KRHB
+kritische
+kron
+Krylov
+KSI
+kt
+ku
+kx
+kz
+l'H
+l'Hopital's
+L'Hopital's
+L'Hospital
+l'Hospital's
+L'Hospital's
+l'Hôpital
+l'Hôpital's
+L'Hôpital's
+labelsep
+laborlawtalk
+lan
+Lapack
+laplace
+Laplace's
+laplacian
+laurent
+LCHandout
+ld
+le
+leastsq
+Lebedev
+Leibnitz
+Leibnitz's
+Lejeune
+lenny
+Leonhard
+lesssim
+letexec
+Lf
+lhopital
+libmpfi
+libpoppler
+Libpoppler's
+Lightfoot
+lightgray
+lim
+Lindgren
+linearalgebra
+linecolor
+linestyle
+linewidth
+lintian
+literal's
+LJDRP
+ll
+lllcl
+lmodern
+ln
+localscalebox
+Lodovico
+logdef
+logica
+logische
+lor
+Lothar
+lppl
+lpr
+LUI
+Luitzen
+LUP
+mA
+Maclaurin
+MacTutor
+Magnus
+mAI
+maintenv
+majorization
+majorize
+majorizes
+majorizing
+makeindex
+manpage
+manu
+marginalizing
+Mascheroni
+mastersthesis
+mathbb
+mathbios
+mathemat
+Mathematica
+Mathematik
+mathworld
+mC
+mcs
+mD
+mdclxvi
+mDI
+mdots
+Melc
+Melcón
+Melissus
+menschlichen
+meromorphic
+metadefinition
+metadual
+metaduality
+metasyntactic
+meth
+mf
+mfd
+mG
+mgilbert
+mH
+mI
+Microelectronic
+Mifflin
+milliamps
+min
+Mineola
+minimalistically
+minorization
+misc
+Mish
+miskerning
+mislinking
+misnotation
+Mittra
+mj
+mk
+mK
+mkdiff
+mKI
+mkorig
+mL
+mLI
+mn
+mni
+Moivre
+Moivre's
+monophthongal
+Mosig
+Mosig's
+Moskowitz
+mP
+mPDLUI
+mpfi
+mPI
+mT
+MTC
+mtxalg
+mtxinv
+mU
+mUI
+mul
+multidigit
+multiline
+multisource
+multitarget
+Müller
+mv
+mwtang
+mx
+myk
+myN
+myrad
+myscale
+myt
+mytn
+mz
+nA
+na
+nAI
+narg
+nasa
+Naturwissenschaft
+Navier
+Nayfeh
+nB
+nC
+neg
+nen
+nes
+newadvent
+nF
+nf
+nG
+ni
+Niccol
+Niccolò
+Niels
+nint
+nj
+nk
+NMU
+NMUer
+NMUs
+nn
+Noetics
+Noë
+nonaccelerating
+nonanalytic
+nonassociative
+nonassociativity
+nonaxiomatic
+noncommutivity
+nonconjugate
+nonconvergence
+Nonconvergent
+noncorner
+nondegenerate
+nondiagonalizability
+nondiagonalizable
+nondiscerning
+nonentire
+nonethemore
+nonexchanges
+Nongeometrical
+nonintegral
+noninterchanges
+nonintuitives
+noninvertibility
+noninvertible
+nonisotropic
+nonobvious
+nonoptimal
+nonoverdetermined
+nonplanar
+nonradical
+nonradicals
+nonrectangular
+nonredundant
+nonrepeating
+nonsquare
+nontrivially
+nonunderdetermined
+nonunique
+nonunit
+nonvector
+nonzeros
+normdist
+northwestward
+nos
+Nostrand
+noth
+nounal
+nP
+nPDLUKS
+nPDLUU
+npr
+nQ
+NR
+nSI
+nT
+nTI
+ntu
+numeralize
+Nussbaum
+nwh
+Oaxtepec
+Ockham's
+od
+Offline
+oH
+oI
+Oneworld
+oo
+opital
+opital's
+ops
+org
+orienteering
+orig
+ORIGTARGZ
+orthogonalize
+orthogonalizes
+orthogonalizing
+orthographically
+orthonormality
+orthonormalization
+orthonormalize
+orthonormalized
+orthonormalizes
+orthonormalizing
+orthonormally
+outlier
+Ouvres
+overappreciated
+overdetermine
+overdetermined
+oW
+P'D'L'U'I
+p'i
+p'q
+Papoulis
+paraboloids
+parochiality
+parseval
+Parseval's
+pdf
+pdfetex
+PDFLaTeX
+PDFs
+PDFTricks
+PDLU
+PDLUI
+PDLUKS
+PDLUKSI
+Pedersen
+Penrose
+peri
+permutor
+permutor's
+permutors
+perp
+perpendicular's
+perpendicularity
+perspectived
+Pfufnik
+PGF
+pgn
+phasor
+phdthesis
+phen
+Philos
+philosophica
+Philosophie
+pino
+Pinter's
+Piperno
+Piscataway
+pj
+pkg
+pl
+planetm
+PlanetMath
+planetmath
+plato
+plotpoints
+pm
+pointwise
+Polimetrica
+polyderiv
+polyderivz
+poppler
+PostScript
+postulational
+Poynting
+Pp
+pq
+pre
+Precalculus
+predeclared
+preëstablished
+preëxisting
+preëxists
+pref
+preloads
+prepublished
+preshifted
+primeprf
+prob
+proddiv
+proëmial
+professional's
+professionalesque
+ps
+pseudoinverse
+pseudoinversion
+psinv
+psselect
+pst
+pstricks
+PSTricks
+psutils
+pt
+PTR
+purec
+px
+pythag
+Q'H
+Q'R
+qp
+Qq
+QR
+QRinv
+qs
+qu
+quadeq
+quartic
+quartic's
+quartics
+quasielementaries
+quasielementary
+quasiëmpiricist
+quasijustification
+QUDS
+quintic
+Qw
+rA
+rad
+rAI
+Raj
+randomish
+Randyn
+Raphson
+Raton
+Rayleigh's
+rB
+rC
+rcccl
+rcccll
+rCI
+rcl
+rclcl
+rclcrcl
+rclcrclcrcl
+rcr
+rcrccccl
+rcrcl
+rcrcrcl
+README
+reassociate
+Reassociating
+RECT
+redacted
+redaction
+Redlin
+reductio
+reënters
+reëxamination
+reinitialization
+RelSteps
+Renate
+reorienting
+reorients
+rereasoned
+rerotation
+rescale
+Rescripted
+Resnick
+resolvent
+retypeset
+REU
+REUPapers
+reversibly
+Revol
+Rewove
+rfakeroot
+rG
+rgen
+Rhees
+rI
+rigueur
+rj
+rK
+rKH
+rKI
+rKS
+rKSI
+rm
+Roessler
+Rosenkrantz
+Rouillier
+Royale
+rr
+rR
+rrr
+rrrr
+rS
+rSA
+rT
+rTI
+rU
+Rugen
+ruN
+S'X
+Sa
+Sadiku
+Saleem
+sarge
+Sarnow
+SAS
+scalarization
+scalarize
+scalebox
+Schaum's
+schematical
+Schlimm
+Scholz
+schur
+schwarz
+SCons
+sed
+Sedra
+semiconvergent
+Sep
+sF
+sf
+sg
+sG
+Shanhe
+Shenk
+Shilov
+Shilov's
+Shirer
+showFouriCorrelation
+sid
+sid's
+Sieg
+Sii
+Silverman's
+Siméon
+sinarg
+sinc
+sinh
+sinint
+sinq
+sion
+Sivagnanam
+smcvt
+Smedegaard
+Smythies
+snoitavired
+solenoidal
+Sommerfeld
+sosmath
+specf
+specif
+sph
+SPHER
+sq
+src
+Srin
+Srinivasan
+SRW
+Ss
+SSA
+SSS
+stairstep
+stairsteps
+stanford
+stat
+stdev
+Stegun
+Stepney
+ster
+Stiefel
+stit
+stochastics
+stokesthm
+storable
+stormclouds
+Stratton
+Stroustrup
+struct
+Strutt
+StTh
+Sturler
+subdiagonals
+subdir
+subdisciplines
+subdomain
+subgrid
+subgrid's
+subgridded
+subimposed
+subimposes
+subimposition
+submatrices
+submatrix
+subrogate
+subseries
+subsubsection
+subsubset
+sudo
+summa
+summational
+sumworthiness
+superdiagn
+susan
+SVD
+SVDinv
+SVDr
+sx
+symbology
+Tait
+tal's
+taries
+Tartaglia
+Tartaglia's
+tary
+Tata
+taylor
+Tdefadd
+Tdefsc
+Tdefxchg
+te
+techreport
+ters
+teTeX
+tetex
+Teubner
+tex
+TeXLive
+texlive
+TeXlive
+tf
+thb
+Theo
+Theodor
+Theorie
+thopman
+TikZ
+tion
+tk
+tl
+tla
+tlb
+tlc
+tld
+Toader
+toc
+Toscano
+train's
+Trans
+Transcendenten
+transform's
+triangleq
+triangulars
+trigonometrics
+trigs
+trop
+truncator
+truncators
+Tt
+Tullio
+turN
+TW
+twopi
+txt
+Tymoczko
+Tymoczko's
+ua
+ub
+uc
+uchicago
+UDS
+uf
+ugent
+UI
+uk
+ul
+un
+unblockaded
+uncalculatable
+unconfused
+unconservative
+uncontrived
+und
+underappreciated
+underdetermined
+undergird
+undifferentiability
+unedifying
+unfundamental
+unintegrated
+unitarily
+unitless
+unitlessly
+Univ
+unmix
+unnecessity
+unpermuted
+unpersuaded
+unpolished
+unreadably
+unremarkably
+unrigorous
+unrooted
+unsuggestive
+unsureness
+unswapped
+untainted
+Untersuchung
+untersuchungen
+unverifiable
+unvisualizable
+unwarrantedly
+uoguelph
+URL
+usepackage
+usr
+UT
+utah
+utm
+Uu
+uv
+über
+Vallejo
+vanAtten
+vanBladel
+vari
+vcalc
+VD
+vdVorst
+Veit
+verdate
+Vieta
+Vieta's
+Viète
+von
+Vorlesungen
+Vorst
+vspace
+Vt
+vv
+WADavis
+wave's
+Weierstrassian
+Weisstein
+Weisstein's
+werke
+Werror
+Westin
+weyl
+Weyl's
+wg
+wh
+wheth
+Wikimedia
+wikip
+wikipedia
+Wikipedians
+Wilbraham
+Wilfried
+WTAng
+Ww
+www
+x'y'z
+x'z'y
+XA
+xform
+xg
+xkey
+xkeyval
+xkvltxp
+xkvtxhdr
+xkvview
+xplora
+XRef
+xs
+xt
+xunit
+XY
+xz
+y'x'z
+y'z'x
+Yan
+york
+ys
+yt
+yunit
+Yy
+z'x'y
+z'y'x
+Zandt's
+zählen
+zcat
+ze
+Zermelo
+ZFC
+zur
+Zz
diff pruN 0.53.201204142/debian/helper/extendchangelog 0.56.20180123.12/debian/helper/extendchangelog
 0.53.201204142/debian/helper/extendchangelog 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/extendchangelog 20170509 16:41:27.000000000 +0000
@@ 0,0 +1,86 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# This script adds empty new entries at the changelogs' tops.
+
+our $usage = <;
+ close CL;
+}
+if ( $do_cld ) {
+ open CL, '<', $changelog_deb;
+ @cld = ;
+ close CL;
+}
+
+# Add changelog entries.
+my $ver = "${Def::basever}.$verdate";
+my $ver_deb = "${ver}1";
+unshift @cl , < $date
+
+END
+unshift @cld, < $date
+
+END
+
+# Write the changelogs out.
+if ( $do_cl ) {
+ open CL, '>', $changelog;
+ print CL @cl;
+ close CL;
+}
+if ( $do_cld ) {
+ open CL, '>', $changelog_deb;
+ print CL @cld;
+ close CL;
+}
+
diff pruN 0.53.201204142/debian/helper/FilesTex 0.56.20180123.12/debian/helper/FilesTex
 0.53.201204142/debian/helper/FilesTex 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/FilesTex 20180123 00:00:00.000000000 +0000
@@ 0,0 +1,28 @@
+main
+sphere
+pref
+intro
+alggeo
+trig
+drvtv
+cexp
+noth
+integ
+taylor
+inttx
+cubic
+matrix
+gjrank
+mtxinv
+eigen
+vector
+vcalc
+fours
+fouri
+prob
+stub
+conclu
+hex
+greek
+purec
+hist
diff pruN 0.53.201204142/debian/helper/ispellall 0.56.20180123.12/debian/helper/ispellall
 0.53.201204142/debian/helper/ispellall 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/ispellall 20170509 17:13:17.000000000 +0000
@@ 0,0 +1,13 @@
+#! /bin/bash e
+
+# Run this to check all appropriate source files with ispell, using (and
+# adding to) the local development dictionary.
+
+D=$( dirname $0 )
+ispell x p $D/dict \
+ $D/../../tex/*.{tex,bib} \
+ $D/../../doc/* \
+ $D/../../debian/{changelog,control,copyright} \
+ $D/../../debian/helper/RelSteps \
+ $( find $D/../.. name README )
+
diff pruN 0.53.201204142/debian/helper/letexec 0.56.20180123.12/debian/helper/letexec
 0.53.201204142/debian/helper/letexec 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/letexec 20170509 16:43:43.000000000 +0000
@@ 0,0 +1,14 @@
+#! /bin/bash e
+
+# The .diff.gz patch format does not preserve executable flags on files.
+# This is fine, but run the present script as
+#
+# bash debian/helper/letexec
+#
+# after unpacking the source to let the appropriate helpers be
+# executable.
+
+for A in $( find $( dirname $0 ) mindepth 1 maxdepth 1 type f ) ; do
+ sed ne '/^#!/q0;q1' $A && chmod a+x $A
+done
+
diff pruN 0.53.201204142/debian/helper/README 0.56.20180123.12/debian/helper/README
 0.53.201204142/debian/helper/README 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/README 20170509 16:50:19.000000000 +0000
@@ 0,0 +1,35 @@
+
+The files in this directory are development helpers
+only, probably of interest to you only if you are
+developing the package. You do not need these files to
+build, install or use the package, nor strictly do you
+need them even to modify the package. In fact you could
+delete the entire directory without ill effect. If
+modifying the package, however, you may optionally find
+some of the scripts here convenient.
+
+Several of the files are scripts. You can "bash
+letexec" to make these executable.
+
+The file helper/dict is a local dictionary for ispell.
+You can use it with "ispell x p helper/dict
+tex/*.{tex,bib}", for example.
+
+The file helper/vim contains nothing but some macros the
+author likes to use when editing the book with "vim S
+helper/vimmacros tex/foo.tex". It is supposed that
+most people probably prefer making their own little
+editor macros (for their own favorite editors); so,
+unless you like using other people's editor macros for
+some reason, you can ignore the file.
+
+Because these files are just development helpers, they
+are only minimally documented. The comments at or near
+the head of the file explain what each file is and what
+it is for. Refer to the various files for further
+information.
+
+If this helper/ directory interests you, then so
+probably also will the script debian/mkorig and
+the 'orig' and 'origforce' targets in debian/rules.
+
diff pruN 0.53.201204142/debian/helper/RelSteps 0.56.20180123.12/debian/helper/RelSteps
 0.53.201204142/debian/helper/RelSteps 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/RelSteps 20190105 00:00:00.000000000 +0000
@@ 0,0 +1,172 @@
+
+
+
+DEVELOPMENT AND RELEASE STEPS
+
+
+
+Although one need not develop Derivations in a Debian GNU/Linux
+environment, the authorwho happens to be a Debian user and
+developerdoes so. This file documents some of the author's
+development practices in that context. (The file does not carefully
+distinguish between actions the author takes as upstream developer and
+actions he takes as Debian developer, though the two roles are indeed
+logically distinct.)
+
+In all the file,
+
+ * $D1 represents the top development directory;
+ for example, ~/der/derivations0.5.20070322.
+ * $DW represents the top development directory before it has been
+ assigned an actual revision date (see $TW below).
+ * $DL represents the top development directory of the last revision.
+ * $DP represents $D1/.., $DW/.. and $DL/.., the parent
+ of the top development directory, as ~/der.
+ * $T1 represents the revision date; for example 20070322.
+ * $TW represents the working revision date, inasmuch as the actual
+ revision date normally is not known until release time. (For this
+ date, the author usually but not always chooses the next day not a
+ Sunday following the actual date of the last revision.)
+ * $DSC represents the Debian .dsc file, where the needed Debian source
+ package files in $DP are as
+
+ derivations_0.5.20070322.orig.tar.gz
+ derivations_0.5.200703221.diff.gz
+ derivations_0.5.200703221.dsc
+
+ * $DIFFGZ and $ORIGTARGZ represent the .diff.gz and .orig.tar.gz
+ as above.
+ * $DEB represents the Debian binary package,
+ as derivations_0.5.200703221_all.deb.
+
+The file naturally is not a complete set of instructions, being rather
+notes to jog the author's memory. For example, the file does not
+explain what the command "view foo" is for, though of course it is for
+the author to review foo to ensure that all is in order, making
+corrections if necessary. However, there is probably enough information
+here to be of use to a future developer.
+
+
+BEGINNING DEVELOPMENT OF A NEW REVISION
+
+
+The author opens development of a new revision of the book by
+approximately the following steps.
+
+1. Optionally, or if $DL does not exist,
+
+ cd $DP
+ rm R $DL  true
+ dpkgsource x $DSC
+ cd $DL
+ bash debian/helper/letexec
+ cd ..
+
+2. Give the commands
+
+ cd $DL
+ fakeroot debian/rules clean
+ cd ..
+ cp ai $DL $DW
+ cd $DW
+ debian/helper/extendchangelog
+ debian/helper/updatedate $TW
+
+If there are Quilt patches, then the author probably integrates them
+into the source, after which he probably runs debian/helper/rmquilt
+to deQuilt the source.
+
+
+RELEASING
+
+
+The author closes development of a new revision of the book and prepares
+it for release by approximately the following steps.
+
+1. Decide on an official revision date, then
+
+ cd $DP
+ mv i $DW $D1
+ cd $D1
+ debian/helper/updatedate $T1
+ fakeroot debian/rules clean
+ debian/helper/buffesrc
+
+2. Repeatedly give the following commands, making corrections as
+indicated, until no further corrections remain to be made:
+
+ debian/helper/ispellall
+ make clean tex/check  less
+ fakeroot debian/rules clean
+ debian/helper/buffesrc
+
+During the "make tex/check", index entries can overfill their margins
+by any amount less than 9.0pt. Where necessary (hopefully not often),
+any page can be less than 2.0pt too tall. (These limits may be too
+strait, but experience has not yet asked the author to judge looser
+limits; so, these are the limits to enforce for now.)
+
+3. Finalize doc/changelog and debian/changelog with the following
+commands. Change the distribution in debian/changelog from experimental
+to unstable if appropriate.
+
+ cd ..
+ diff turN $DL $D1 >der.diff
+ grep B3 A3 '19\(69\70\)' der.diff
+ view der.diff
+ cd $D1
+ vim doc/changelog
+ vim debian/changelog
+ fakeroot debian/rules clean
+ debian/helper/buffesrc
+ cd ..
+ diff turN $DL $D1 >der.diff
+ grep B3 A3 '19\(69\70\)' der.diff
+ cd $D1
+
+4. Optionally,
+
+ fakeroot debian/rules cleandebforce
+
+5. Because the author actually uses the Debianized source to develop,
+create a nonDebianized upstream source by
+
+ fakeroot debian/rules orig
+
+6. Omitting the uc and us if it is wanted actually digitally to sign
+the sources, build source and binary package files by
+
+ dpkgbuildpackage rfakeroot nosign
+ fakeroot debian/rules clean
+ cd ..
+
+7. Optionally review the .debian.tar.xz to ensure that it actually
+contains the Debianization as it ought.
+
+8. Give the command
+
+ sudo dpkg i $DEB
+
+9. Check that it has seemed to install correctly.
+
+10. Optionally upload.
+
+
+
+For lack of a better place to note it, it is here noted that the author
+might check the following on completing a chapter:
+
+ * /\~\$ (short math)
+ * /\(^\[[:space:]]\)\$ (long math)
+ * /\$\('s\th\)\([^az]\$\) (no tied *'s or *th)
+ * /}[,.?;:!] (\emph{text[,.?;:!]})
+ * /\\\\ (no \\ at the end of an array or eqnarray)
+ * /\(^\[[:space:]]\)(\\ref{ (no untied eqn references)
+ * /^[[:space:]]*\\emph (no \emph inadvertently in place of \index)
+ * /\\index.*\![[:space:]] (no space following `!' in \index)
+ * /bad break (bad breaks are in fact bad)
+
+And here is noted the command to scan TeX files in book order:
+
+for A in $(<../debian/helper/FilesTex); do B="$A.tex"; grep H 'PATTERN' $B; done
+
diff pruN 0.53.201204142/debian/helper/rmquilt 0.56.20180123.12/debian/helper/rmquilt
 0.53.201204142/debian/helper/rmquilt 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/rmquilt 20190105 00:00:00.000000000 +0000
@@ 0,0 +1,14 @@
+#! /bin/bash e
+
+# Run this script to deQuilt the source, deleting
+# within the source the files and directories Quilt
+# makes, keeps and uses. Note that the script does not
+# integrate the patches it removes; it just removes
+# them. Therefore, if you wish to integrate any of the
+# patches, then you should probably do that before
+# invoking the script.
+
+D="$( dirname $0 )"
+cd "$D/../.."
+rm rfv .pc debian/patches
+
diff pruN 0.53.201204142/debian/helper/updatedate 0.56.20180123.12/debian/helper/updatedate
 0.53.201204142/debian/helper/updatedate 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/updatedate 20170509 16:45:56.000000000 +0000
@@ 0,0 +1,219 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# Before running this script, you should add a changelog entry (even an
+# empty one) for the current version, if you've not yet done so. The
+# script modifies whichever entry happens to be at the changelog's top.
+#
+# This helper script automatically updates the package's issue date in
+# several files (the files are named in the following block). The
+# script accepts date and time in any format the date(1) command
+# understands, but it writes the date (and time) to each file in that
+# file's appropriate format. Example usage:
+#
+# $ updatedate '24 Jan 2005 21:00 +0000'
+#
+# If no time of day is given, it defaults to 00:00:00 +0000.
+#
+# When also running the helper/updatever script, you probably want to
+# run that script first, then this one. Otherwise debian/changelog is
+# updated in a different manner than you probably thought to update it.
+#
+# Note that the script does not change the name of the toplevel source
+# directory. This is for sanity's sake. You must change the name of
+# the directory yourself.
+#
+# Note also that the self entry in bib.bib, where the book refers to
+# itself, must appear first to run this script. If something else
+# appears first, the script may happily try to update that date,
+# instead.
+
+our $debian = "${FindBin::RealBin}/../../debian" ;
+our $maintex = "${FindBin::RealBin}/../../tex/${Def::main0}.tex";
+our $mainbib = "${FindBin::RealBin}/../../tex/${Def::bib0}.bib" ;
+our $manpage =
+ "${FindBin::RealBin}/../../doc/${Def::out}.${Def::mansect}" ;
+our $readme = "${FindBin::RealBin}/../../${Def::name_readme}" ;
+our $changelog = "${FindBin::RealBin}/../../doc/changelog" ;
+our $changelog_deb = "${debian}/changelog" ;
+our $copyright = "${debian}/copyright" ;
+
+my $warn_msg = "$0: cannot find date line in ";
+
+our $usage = <;
+ @bib = ;
+ @man = ;
+ @rme = ;
+ @cr = ;
+ close MAIN;
+ close BIB ;
+ close MAN ;
+ close RME ;
+ close CR ;
+}
+if ( $do_cl ) {
+ open CL , '<', $changelog ;
+ @cl = ;
+ close CL ;
+}
+if ( $do_deb ) {
+ open CLD , '<', $changelog_deb;
+ @cld = ;
+ close CLD ;
+}
+
+if ( $do_main ) {
+
+ # Update the dates in the main TeX source file.
+ $_ = 0; ++$_ until $_ > $#main 
+ $main[$_] =~
+ s/^(\\newcommand\{\\veryear\}\{)([^\{\}]+)(\})\s*?$/$1$year$3/;
+ $_ <= $#main or warn "$warn_msg$maintex\n";
+ $_ = 0; ++$_ until $_ > $#main 
+ $main[$_] =~
+ s/^(\\newcommand\{\\verdate\}\{)([^\{\}]+)(\})\s*?$/$1$date$3/;
+ $_ <= $#main or warn "$warn_msg$maintex\n";
+
+ # Update the date in the bibliography's (leading) selfentry.
+ $_ = 0; ++$_ until $_ > $#bib 
+ $bib[$_] =~ s/^(\s*year=\{)([^\{\}]+)(\},)\s*?$/$1$date$3/;
+ $_ <= $#bib or warn "$warn_msg$mainbib\n";
+
+ # Update the dates on the manpage.
+ $man[0] =~ s/(")([^"]*?)("\s*\\)$/$1$date$3/
+ or warn "$warn_msg$manpage\n";
+ $_ = $#man; $_ until $_ < 0 
+ $man[$_] =~ s/^(Copyright \(C\) \d+\\)(\d+)/$1$year/;
+ $_ >= 0 or warn "$warn_msg$manpage\n";
+
+ # Update the date at the foot of the readme and the copyright year
+ # therein.
+ $_ = $#rme; $_ until $_ < $#rme1 
+ # $rme[$_] =~ s/^(.*, )([^,\n]*,[^,\n]*)$/$1$cld/;
+ $rme[$_] =~ s/^.*\S.*$/$cld/;
+ $_ >= $#rme1 or warn "$warn_msg$readme\n";
+ $_; $_ until $_ < 0 
+ $rme[$_] =~ s/^(Copyright \(C\) \d+)(\d+)/$1$year/;
+ $_ >= 0 or warn "$warn_msg$readme\n";
+
+ # Update the year in the copyright file.
+ $_ = 0; ++$_ until $_ > $#cr 
+ $cr[$_] =~ s/^(Copyright \(C\) \d+)(\d+)/$1$year/;
+ $_ <= $#cr or warn "$warn_msg$copyright\n";
+
+}
+
+if ( $do_cl ) {
+
+ # Update the dates in the main changelog.
+ unless ( $opt{s} ) {
+ $cl[0] =~ s/(\([^()]*\.)(\d+)([^().]*?\))/$1$verd$3/
+ or warn "$warn_msg$changelog\n";
+ }
+ {
+ my $authe = "${Def::author} <${Def::email}>";
+ $_ = 0; ++$_ until $_ > $#cl 
+ $cl[$_] =~ s/^(  $authe )(\S(?:.*?\S)??)\s*$/$1$cld\n/;
+ $_ <= $#cl or warn "$warn_msg$changelog\n";
+ }
+
+}
+
+if ( $do_deb ) {
+
+ # Update the dates in the debian/changelog.
+ unless ( $opt{s} ) {
+ # Update the version date atop debian/changelog.
+ $cld[0] =~ s/(\([^()]*\.)(\d+)([^().]*?\))/$1$verd$3/
+ or warn "$warn_msg$changelog_deb\n";
+ }
+ {
+ my $authe = "${Def::author} <${Def::email_deb}>";
+ $_ = 0; ++$_ until $_ > $#cld 
+ $cld[$_] =~ s/^(  $authe )(\S(?:.*?\S)??)\s*$/$1$cld\n/;
+ $_ <= $#cld or warn "$warn_msg$changelog_deb\n";
+ }
+
+}
+
+# Write updated files out.
+if ( $do_main ) {
+ open MAIN, '>', $maintex ;
+ open BIB , '>', $mainbib ;
+ open MAN , '>', $manpage ;
+ open RME , '>', $readme ;
+ open CR , '>', $copyright ;
+ print MAIN @main;
+ print BIB @bib ;
+ print MAN @man ;
+ print RME @rme ;
+ print CR @cr ;
+ close MAIN;
+ close BIB ;
+ close MAN ;
+ close RME ;
+ close CR ;
+}
+if ( $do_cl ) {
+ open CL , '>', $changelog ;
+ print CL @cl ;
+ close CL ;
+}
+if ( $do_deb ) {
+ open CLD , '>', $changelog_deb;
+ print CLD @cld ;
+ close CLD ;
+}
+
diff pruN 0.53.201204142/debian/helper/updatedebver 0.56.20180123.12/debian/helper/updatedebver
 0.53.201204142/debian/helper/updatedebver 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/updatedebver 20170509 16:46:12.000000000 +0000
@@ 0,0 +1,61 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# This script adds or updates the Debian package version number at the
+# head of debian/changelog.
+
+our $usage = <;
+close CL;
+
+# Update the changelog's first line.
+$cld[0] =~
+ s/^(${Def::out} \(${Def::basever}\.\d+)((?:\d+)?)(\))/$1$vdstr$3/
+ or die "$0: can't parse the first line of debian/changelog\n";
+
+# Write the changelog out.
+open CL, '>', $changelog_deb;
+ print CL @cld;
+close CL;
+
diff pruN 0.53.201204142/debian/helper/verifyasciiall 0.56.20180123.12/debian/helper/verifyasciiall
 0.53.201204142/debian/helper/verifyasciiall 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/verifyasciiall 20170520 14:35:15.000000000 +0000
@@ 0,0 +1,35 @@
+#! /bin/bash e
+
+# Run this on a cleaned source to verify that no characters anywhere appear
+# but these: ascii graphables; space; unix newline. Allow however a
+# few appropriate exceptions such as leading tabs in Makefiles.
+
+D="$( dirname $0 )"
+cd "$D/../.."
+STATUS=0
+for A in $( find . ); do
+ if [ "$A" != './debian/helper/dict' ] && [ "$A" != './debian/changelog' ]; then
+ B="$( basename $A )";
+ IS_MAKEFILE=0
+ if { echo "$B"  sed nre '/^(Makefile(.*)?rules)$/q0;q1'; }; then
+ IS_MAKEFILE=1
+ fi
+ A="$A" IS_MAKEFILE="$IS_MAKEFILE" perl <"$A" \
+ e 'use warnings;' \
+ e 'use strict;' \
+ e 'use integer;' \
+ e 'my $status = 0;' \
+ e 'while (<>) {' \
+ e ' chomp;' \
+ e ' s/^\t// if $ENV{IS_MAKEFILE};' \
+ e ' if (/[^\040\176\n]/) {' \
+ e ' $status = 1;' \
+ e ' warn "$ENV{A}:$.:$_\n";' \
+ e ' }' \
+ e '}' \
+ e 'exit $status;' \
+  STATUS=1
+ fi
+done
+exit $STATUS
+
diff pruN 0.53.201204142/debian/helper/vimmacros 0.56.20180123.12/debian/helper/vimmacros
 0.53.201204142/debian/helper/vimmacros 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/helper/vimmacros 20180116 17:13:55.000000000 +0000
@@ 0,0 +1,7 @@
+map :w:!make derivations.ps
+imap ~(\ref{})
+map Oa \begin{split}\end{split}^
+se tw=71
+se ic
+se et
+se ai
diff pruN 0.53.201204142/debian/mkorig 0.56.20180123.12/debian/mkorig
 0.53.201204142/debian/mkorig 20120218 10:02:57.000000000 +0000
+++ 0.56.20180123.12/debian/mkorig 20170509 16:20:04.000000000 +0000
@@ 36,19 +36,22 @@ if [[ $HELP == 1 ]] ; then
usage
else
 # Ensure that the target .orig does not already exist.
+ # Ensure that the target .orig.tar.xz does not already exist.
DIRNAME=$( dirname $PWD )
BASENAME=$( basename $PWD )
 ORIG=$DIRNAME/$BASENAME.orig
 if [[ $FORCE == 0 && e $ORIG ]] ; then
+ BASENAME_UNDERSCORE=$( echo $BASENAME  sed re 's/^(.*)/\1_/' )
+ ORIG_TARBALL=$DIRNAME/$BASENAME_UNDERSCORE.orig.tar.xz
+ if [[ $FORCE == 0 && e $ORIG_TARBALL ]] ; then
echo 1>&2 "$0: $ORIG already exists (use f to force)"
exit 1
fi
# Clean the source and prepare the target.
fakeroot debian/rules clean >/dev/null
 rm Rf $ORIG
 mkdir $ORIG
+ rm Rf $ORIG_TARBALL
+ TEMP_DIR=$(mktemp d)
+ TEMP_ORIG=$TEMP_DIR/$BASENAME
+ mkdir p $TEMP_ORIG
# Copy to the target.
for A in $( find . mindepth 1 maxdepth 1 ) ; do
@@ 58,9 +61,16 @@ else
[[ $( basename $A ) == ${DEBONLY[$I]} ]] && ISORIG=1
done
if [[ $ISORIG == 0 ]] ; then
 cp a $A $ORIG/$BNA
+ cp a $A $TEMP_ORIG/$BNA
fi
done
+ # Zip up the target.
+ tar C $TEMP_DIR cJf $TEMP_DIR/$BASENAME_UNDERSCORE.orig.tar.xz $BASENAME
+
+ # Place the target.
+ mv $TEMP_DIR/$BASENAME_UNDERSCORE.orig.tar.xz $DIRNAME/
+ rm rf $TEMP_DIR
+
fi
diff pruN 0.53.201204142/debian/patches/build_fixes 0.56.20180123.12/debian/patches/build_fixes
 0.53.201204142/debian/patches/build_fixes 20120218 10:21:00.000000000 +0000
+++ 0.56.20180123.12/debian/patches/build_fixes 19700101 00:00:00.000000000 +0000
@@ 1,75 +0,0 @@
diff u derivations0.52.20100310/debian/changelog derivations0.52.20100310/debian/changelog
 derivations0.52.20100310.orig/btool/Makefileoptim
+++ derivations0.52.20100310/btool/Makefileoptim
@@ 1,2 +1,2 @@
 optim := O2
werror := $(if $(BUILD_FOR_PACKAGING), , Werror)
+#werror := $(if $(BUILD_FOR_PACKAGING), , Werror)
only in patch2:
unchanged:
 derivations0.52.20100310.orig/btool/Makefile
+++ derivations0.52.20100310/btool/Makefile
@@ 33,16 +33,16 @@
 allobj: $(allobj)
 $(foreach dir, $(srcdir), $(dir)/%): FORCE; $(MAKE) C $(@D) $(@F)
 %.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o: %.cc; g++ $(warn) $(optim) c $< o $*.o

 completepdf: completepdf.o $(allobj)
 g++ $(warn) $(optim) lpoppler $^ o $@
+ g++ $(warn) $(optim) $^ lpoppler o $@

 romanize: romanize.o Util/roman_numeral.o
 g++ $(warn) $(optim) $^ o $@

 a.out: test.o $(allobj)
 g++ $(warn) $(optim) lpoppler $^ o $@
+ g++ $(warn) $(optim) $^ lpoppler o $@

 cleanless:
 $(foreach dir, $(srcdir), $(MAKE) C $(dir) clean ;)
only in patch2:
unchanged:
 derivations0.52.20100310.orig/btool/Util/TeX_atom.cc
+++ derivations0.52.20100310/btool/Util/TeX_atom.cc
@@ 76,11 +76,11 @@
 }

 void Util::TeX_atom_nonterminal::init(
 vector ::const_iterator p,
 const vector::const_iterator end
+ vector ::const_iterator p,
+ const vector::const_iterator end
 ) {
 int level = 0;
 vector::const_iterator q = end;
+ vector::const_iterator q = end;
 for ( ; p != end; ++p ) {
 if ( *p == "{" ) {
 if ( !level ) q = p+1;
@@ 114,20 +114,20 @@
 Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
 const string &line
 ) {
 vector tokens;
+ vector tokens;
 tokenize_TeX( line, &tokens, TRANSLATE_NOBREAKSPACE );
 init( tokens.begin(), tokens.end() );
 }

 Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
 const vector &tokens
+ const vector &tokens
 ) {
 init( tokens.begin(), tokens.end() );
 }

 Util::TeX_atom_nonterminal::TeX_atom_nonterminal(
 const vector::const_iterator begin,
 const vector::const_iterator end
+ const vector::const_iterator begin,
+ const vector::const_iterator end
 ) {
 init( begin, end );
 }
diff pruN 0.53.201204142/debian/patches/cxxflags 0.56.20180123.12/debian/patches/cxxflags
 0.53.201204142/debian/patches/cxxflags 20140126 22:03:38.000000000 +0000
+++ 0.56.20180123.12/debian/patches/cxxflags 19700101 00:00:00.000000000 +0000
@@ 1,34 +0,0 @@
author: Pino Toscano
description: automatically support poppler api changes

 a/btool/Makefile 20140126 21:56:15.859514804 +0000
+++ b/btool/Makefile 20140126 21:56:15.859514804 +0000
@@ 33,7 +33,7 @@
 allobj: $(allobj)
 $(foreach dir, $(srcdir), $(dir)/%): FORCE; $(MAKE) C $(@D) $(@F)
 %.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o: %.cc; g++ $(warn) $(optim) c $< o $*.o
+%.o: %.cc; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o

 completepdf: completepdf.o $(allobj)
 g++ $(warn) $(optim) $^ lpoppler o $@
 a/btool/Makefilesubdir 20140126 21:56:15.859514804 +0000
+++ b/btool/Makefilesubdir 20140126 21:57:25.843514632 +0000
@@ 2,6 +2,8 @@
 # This makefile is meant to be used only when accessed
 # through a symbolic link from an immediate subdirectory.

+CXXFLAGS += $(shell pkgconfig cflags poppler)
+
 warn := Wall Wextra

 include ../Makefileoptim
@@ 15,7 +17,7 @@
 include $(alld)
 endif
 %.d: %.cc; g++ MM $<  sed e 's/:/ $*.d:/' >$@
%.o:; g++ $(warn) $(optim) c $< o $*.o
+%.o:; g++ $(warn) $(optim) $(CXXFLAGS) c $< o $*.o

 cleanless:
 rm fv *.d *.o *.gch a.out
diff pruN 0.53.201204142/debian/patches/fillpopplersxreftablegap 0.56.20180123.12/debian/patches/fillpopplersxreftablegap
 0.53.201204142/debian/patches/fillpopplersxreftablegap 20150531 22:45:06.000000000 +0000
+++ 0.56.20180123.12/debian/patches/fillpopplersxreftablegap 19700101 00:00:00.000000000 +0000
@@ 1,249 +0,0 @@
Description: Fill Libpoppler's new unwillingness to find the last XRef.
 Libpoppler used to report where the last XRef table was. Evidently,
 Libpoppler's developer has since decided that this to be only an internal
 capability of the library, not exposed in the public, nor even in the
 privatepublic, interface. This patch fills the gap, implementing the
 missing functionality.
 .
 derivations (0.53.201204141.2) unstable; urgency=medium
 .
 * Nonmaintainer upload.
 * Automatically support poppler api changes (closes: #690161).
  Thanks to Pino Toscano.
Author: Michael Gilbert
BugDebian: https://bugs.debian.org/690161


The information above should follow the Patch Tagging Guidelines, please
checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
are templates for supplementary fields that you might want to add:

Origin: ,
Bug:
BugDebian: https://bugs.debian.org/
BugUbuntu: https://launchpad.net/bugs/
Forwarded:
ReviewedBy:
LastUpdate:

 derivations0.53.20120414.orig/btool/Makefilesubdir
+++ derivations0.53.20120414/btool/Makefilesubdir
@@ 2,6 +2,7 @@
 # This makefile is meant to be used only when accessed
 # through a symbolic link from an immediate subdirectory.

+CXXFLAGS += std=c++11
 CXXFLAGS += $(shell pkgconfig cflags poppler)

 warn := Wall Wextra
 derivations0.53.20120414.orig/btool/PDF/PDF.cc
+++ derivations0.53.20120414/btool/PDF/PDF.cc
@@ 1,14 +1,115 @@

 #include "PDF.h"
 #include
+#include
+#include
+#include
 #include "PDF_rep.h"

 int PDF::file_length( const PDF &pdf ) {
 return pdf.rep>file_length1;
 }

int PDF::offset_last_xref_table( const PDF &pdf ) {
 return pdf.rep>xref>getEntry(0)>offset;
+int PDF::offset_last_xref_table( const std::string &pdf_filename ) {
+
+ // Update in 2015, seven years after most of the rest of the file and
+ // program were written:
+ //
+ // In an earlier version of the progam, this function's body was
+ // a oneliner:
+ //
+ // return pdf.rep>xref>getLastXRefPos();
+ //
+ // That worked fine until Libpoppler changed its interface, since
+ // which it has hidden the required function in the private section
+ // of an interface, unusable here. In a later version of the
+ // program, this function's body was a different oneliner:
+ //
+ // return pdf.rep>xref>getEntry(0)>offset;
+ //
+ // This unfortunately does the wrong thing, though, with effects
+ // Salvatore Bonaccorso has noticed and brought to attention.
+ // Accordingly, this function itself must now find the position of
+ // the last XRef table, as follows.
+ //
+ // Fortunately, the PDF standard requires the position of an XRef
+ // table to be given in plain ascii, so finding the position is not
+ // too hard. One must only be sure to find the position of
+ // the *last* XRef table.
+ //
+ // The code is not quite as elegant as it might be, but the whole
+ // program needs cleaning up, so let us not worry about that for now.
+ // (The programmer's C++ style was pretty immature back in 2008 in
+ // any case.)
+ //
+ //
+
+ const char key_token[] = "startxref";
+
+ int offset = 1;
+
+ std::ifstream pdf_file(pdf_filename);
+ bool has_preceding_whitespace = true;
+ char digit_stage[] = " ";
+ int c = std::ifstream::traits_type::eof();
+ const char *p = key_token;
+
+ while (true) {
+
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+
+ if (!has_preceding_whitespace  c != *p) {
+ p = key_token;
+ has_preceding_whitespace = std::isspace(c);
+ }
+
+ else {
+
+ ++p;
+
+ if (!*p) {
+
+ // Skip whitespace between key token and offset.
+ bool has_trailing_whitespace = false;
+ while (true) {
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+ if (!std::isspace(c)) break;
+ has_trailing_whitespace = true;
+ }
+
+ if (has_trailing_whitespace) {
+
+ // The key token has been found, so prepare to read the offset.
+ offset = 1;
+
+ // Read the offset.
+ if (std::isdigit(c)) {
+ digit_stage[0] = c;
+ offset = std::atoi(digit_stage);
+ while (true) {
+ c = pdf_file.get();
+ if (c == std::ifstream::traits_type::eof()) goto done;
+ if (!std::isdigit(c)) break;
+ offset *= 10;
+ digit_stage[0] = c;
+ offset += std::atoi(digit_stage);
+ }
+ }
+
+ }
+
+ p = key_token;
+
+ }
+
+ }
+
+ }
+
+ done: return offset;
+
 }

 PDF::Iref PDF::iref_catalog( const PDF &pdf ) {
 derivations0.53.20120414.orig/btool/PDF/PDF.h
+++ derivations0.53.20120414/btool/PDF/PDF.h
@@ 44,7 +44,7 @@ namespace PDF {
 struct PDF_rep;
 class PDF;
 int file_length ( const PDF &pdf );
 int offset_last_xref_table( const PDF &pdf );
+ int offset_last_xref_table( const std::string &pdf_filename );
 Iref iref_catalog ( const PDF &pdf );
 Iref iref_info ( const PDF &pdf );
 int n_obj ( const PDF &pdf );
@@ 75,7 +75,7 @@ class PDF::PDF {
 explicit PDF( const std::string &filename );
 ~PDF();
 friend int file_length ( const PDF &pdf );
 friend int offset_last_xref_table( const PDF &pdf );
+ friend int offset_last_xref_table( const std::string &pdf_filename );
 friend Iref iref_catalog ( const PDF &pdf );
 friend Iref iref_info ( const PDF &pdf );
 friend int n_obj ( const PDF &pdf );
 derivations0.53.20120414.orig/btool/PDF/update_catalog.cc
+++ derivations0.53.20120414/btool/PDF/update_catalog.cc
@@ 212,7 +212,8 @@ string PDF::add_title_to_info(
 string PDF::update_trailer(
 PDF &pdf,
 const int n_pdf_obj,
 const int offset_xref
+ const int offset_xref,
+ const std::string &pdf_filename
 ) {

 PDF_rep *const rep = pdf.get_PDF_rep(magic);
@@ 241,7 +242,7 @@ string PDF::update_trailer(
 char s_Prev[] = "Prev";
 {
 Object obj;
 obj.initInt( offset_last_xref_table(pdf) );
+ obj.initInt( offset_last_xref_table(pdf_filename) );
 new_trailer>add( s_Prev, &obj );
 }

 derivations0.53.20120414.orig/btool/PDF/update_catalog.h
+++ derivations0.53.20120414/btool/PDF/update_catalog.h
@@ 21,7 +21,8 @@ namespace PDF {
 std::string update_trailer(
 PDF &pdf,
 int n_pdf_obj,
 int offset_xref
+ int offset_xref,
+ const std::string &pdf_filename
 );
 }

 derivations0.53.20120414.orig/btool/PDF/updator.cc
+++ derivations0.53.20120414/btool/PDF/updator.cc
@@ 15,6 +15,7 @@ string PDF::updator(
 PDF &pdf,
 const Page_no::PS_page_numbering &nog,
 const TOC::Table &toc,
+ const std::string &pdf_filename,
 const string &title
 ) {

@@ 79,7 +80,8 @@ string PDF::updator(
 string trailer = update_trailer(
 pdf,
 n_obj(pdf) + outline.size(),
 file_offset
+ file_offset,
+ pdf_filename
 );

 string res;
@@ 110,6 +112,7 @@ string PDF::updator(
 pdf,
 Page_no::PS_page_numbering( filename_ps ),
 TOC::Table( filename_toc ),
+ filename_pdf,
 title
 );
 }
 derivations0.53.20120414.orig/btool/PDF/updator.h
+++ derivations0.53.20120414/btool/PDF/updator.h
@@ 19,6 +19,7 @@ namespace PDF {
 PDF &pdf,
 const Page_no::PS_page_numbering &nog,
 const TOC::Table &toc,
+ const std::string &pdf_filename,
 const std::string &title = std::string()
 );
 std::string updator(
diff pruN 0.53.201204142/debian/patches/fromstretchtowardbuster 0.56.20180123.12/debian/patches/fromstretchtowardbuster
 0.53.201204142/debian/patches/fromstretchtowardbuster 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/debian/patches/fromstretchtowardbuster 20190105 01:00:00.000000000 +0000
@@ 0,0 +1,357 @@
+Description: update from stretch toward buster
+ Upstream develops the book on a Debian stable platform, which now
+ is Debian 9 stretch. Upstream therefore supplies the book's source as
+ packaged for Debian stable. The present patch updates the source for
+ building on Debian sid and for packaging on sid toward buster.
+ .
+ It is expected that upstream will incorporate the patch after buster
+ is stable.
+ .
+ * Reconfigured and rebuilt for upload to Debian sid.
+ * Conformed btool/ to sid's Poppler, version 0.69 (closes: #884476).
+ Required at least this version of Poppler to build.
+Author: Thaddeus H. Black
+BugDebian: https://bugs.debian.org/884476
+Forwarded: notneeded
+LastUpdate: 20190105
+
+ derivations0.56.20180123.1.orig/btool/PDF/PDF.cc
++++ derivations0.56.20180123.1/btool/PDF/PDF.cc
+@@ 181,14 +181,12 @@ PDF::PDF::PDF( const std::string &filena
+ rep>trailer = obj>getDict();
+ }
+ {
+ Object obj;
+ { char s[] = "Size"; rep>trailer>lookup( s, &obj ); }
++ Object obj = rep>trailer>lookup( "Size", 0 );
+ if ( !obj.isInt() ) throw Exc_PDF();
+ rep>n_obj1 = obj.getInt();
+ }
+ {
+ rep>catalog_obj = new Object();
+ rep>xref>getCatalog( rep>catalog_obj );
++ rep>catalog_obj = new Object( rep>xref>getCatalog() );
+ if ( !rep>catalog_obj>isDict() ) throw Exc_PDF();
+ rep>catalog = rep>catalog_obj>getDict();
+ }
+@@ 197,18 +195,14 @@ PDF::PDF::PDF( const std::string &filena
+ if ( !rep>catalog2>isOk() ) throw Exc_PDF();
+ }
+ {
+ Object obj;
+ { char s[] = "Info"; rep>trailer>lookupNF( s, &obj ); }
++ Object obj( rep>trailer>lookupNF( "Info" ) );
+ if ( !obj.isRef() ) throw Exc_PDF();
+ const Ref ref = obj.getRef();
+ rep>info_iref = Iref( ref.num, ref.gen );
+ }
+ {
+ rep>info_obj = new Object();
+ rep>xref>fetch(
+ rep>info_iref.i,
+ rep>info_iref.gen,
+ rep>info_obj
++ rep>info_obj = new Object(
++ rep>xref>fetch( rep>info_iref.i, rep>info_iref.gen, 0 )
+ );
+ if ( !rep>info_obj>isDict() ) throw Exc_PDF();
+ rep>info = rep>info_obj>getDict();
+ derivations0.56.20180123.1.orig/btool/PDF/README
++++ derivations0.56.20180123.1/btool/PDF/README
+@@ 1,3 +1,42 @@
+
+ Here are source files to interpret and modify PDF.
+
++In Jan. 2019, with Debian buster then in a late
++prerelease stage of development, update_catalog.cc
++and PDF.cc were minimally but still fairly extensively
++revised to track libpopplerprivatedev, version 0.69.
++The combination of [a] layered revisions over
++decades (during which standard C++ has significantly
++advanced) to track an advertisedly unstable
++private Poppler API and [b] Thaddeus H. Black's once
++immature C++ coding style (for this code was one of the
++projects by which Thaddeus first learned C++) leaves
++the code in an interesting state, doesn't it?
++Realistically, the code will probably never be cleaned
++up. It works.
++
++Compared against version 0.48 of
++libpopplerprivatedev (issued with Debian stretch),
++version 0.69 handles its Object type differently,
++inverting the Object's manner of association to types
++like Dict. This seems to represent an improvement but
++of course such improvements will break code like the
++present code that relies upon them. For better or
++worse, one suspects that more such breaking changes
++will come. For one, types like Dict might be made to
++inherit from Object (will they? unknown, but they
++might). Derivations shall have to keep up.
++
++In case Poppler's developers or package maintainers
++read this, Thaddeus' view is that breaking changes of
++the aforementioned kind are right, though Thaddeus does
++not particularly enjoy tracking them! At some future
++date, it would be nice if the private Poppler API would
++stabilize (presumably then becoming nonprivate), but
++Poppler probably wants more development before
++that occurs. Poppler is useful software, devs. Keep
++working on it.
++
++For information, it appears that the code in the
++present directory was first introduced in Sept. 2007.
++
+ derivations0.56.20180123.1.orig/btool/PDF/update_catalog.cc
++++ derivations0.56.20180123.1/btool/PDF/update_catalog.cc
+@@ 1,5 +1,6 @@
+
+ #include "update_catalog.h"
++#include
+ #include
+ #include
+ #include
+@@ 11,6 +12,7 @@
+ #include "PDF.h"
+ #include "PDF_rep.h"
+
++using std::move;
+ using std::string;
+ typedef std::set set;
+ const int magic = 0x9f05; // deprecated
+@@ 19,6 +21,40 @@ const int magic = 0x9f05; // deprecated
+ // feed it. In an earlier Libpoppler, didn't it used to? Maybe not; I can't
+ // clearly remember. In any case, letting the keystring go out of scope now
+ // invalidates the Dict. THB, March 2010
++//
++// Well, Poppler's hardly documented private API has changed again.
++//
++// I do not critcize, of course. On the contrary, one appreciates Poppler's
++// continued development, nor would now (nor maybe ever) seem a good time for
++// Poppler's developers to document the private API in question. Premature
++// stabilization of an API can cause much trouble, after all. If and when the
++// time comes to stabilize and to document, they'll know. Meanwhile, for me at
++// least, access to Poppler's source suffices, yet the observation that little
++// privateAPI documentation exists remains an observation of fact as far
++// as I know. The fact, or at any rate the observation, incidentally affects
++// the present program as follows.
++//
++// I am unlikely to relearn Poppler's latest pattern of memory allocation with
++// each version0.* Poppler release. Maybe when 1.0 arrives? Maybe never.
++// Meanwhile, I am letting the program leak some memory. For a program of this
++// kind, whose execution time is short and resource needs are modest, such
++// leaking probably has no practical effect, for of course the kernel
++// automatically reclaims all leaked memory as soon as the program exits.
++// Still, on principle, leaking is not neat, is it?
++//
++// Where you see below a construct like
++//
++// Dict &dict = *( new Dict( static_cast(0) ) );
++//
++// the construct's purpose is to delay or prevent a destructor of Poppler's.
++// As far as I know, this is wrong, or is at any rate a workaround, but insofar
++// as I am unlikely to relearn Poppler's latest pattern of memory allocation
++// with each version0.* Poppler release, especially while Poppler's
++// private API (understandably) remains so little documented, why, such a
++// construct does what the present program needs it
++// to do. THB, Jan. 2019
++//
++//
+
+ namespace {
+
+@@ 33,11 +69,9 @@ namespace {
+ ) {
+ const int size = src>getLength();
+ for ( int i = 0; i < size; ++i ) {
+ char *const key = src>getKey(i);
++ const char *const key = src>getKey(i);
+ if ( keys.count(key) ) continue;
+ Object obj;
+ src >getValNF( i , &obj );
+ dest>add ( key, &obj );
++ dest>add( key, Object( src>getValNF( i ) ) );
+ }
+ return size;
+ }
+@@ 54,65 +88,33 @@ string PDF::update_catalog(
+ // To understand this code, refer to the Libpoppler headers and
+ // to Adobe's PDF Reference 1.7, sect. 8.3.1.
+
+ Object catalog_obj;
+ catalog_obj.initDict(static_cast(0));
+ Dict *catalog = catalog_obj.getDict();
++ Dict &catalog = *( new Dict ( static_cast(0) ) );
+ {
+ set keys;
+ { char s[] = "PageLabels"; keys.insert(s); }
+ { char s[] = "Outlines" ; keys.insert(s); }
+ copy_but( catalog, rep>catalog, keys );
++ copy_but( &catalog, rep>catalog, keys );
+ }
+
+ Object dict_Roman_obj;
+ char s_Roman[] = "S";
+ {
+ dict_Roman_obj.initDict(static_cast(0));
+ Dict *const dict_Roman = dict_Roman_obj.getDict();
+ Object name_Roman;
+ { char s[] = "r"; name_Roman.initName(s); }
+ dict_Roman>add( s_Roman, &name_Roman );
+ }
++ Dict &dict_Roman = *( new Dict( static_cast(0) ) );
++ dict_Roman.add( "S", Object( objName, "r" ) );
+
+ Object dict_Arabic_obj;
+ char s_Arabic[] = "S";
+ {
+ dict_Arabic_obj.initDict(static_cast(0));
+ Dict *const dict_Arabic = dict_Arabic_obj.getDict();
+ Object name_Arabic;
+ { char s[] = "D"; name_Arabic.initName(s); }
+ dict_Arabic>add( s_Arabic, &name_Arabic );
+ }
++ Dict &dict_Arabic = *( new Dict( ( static_cast(0) ) ) );
++ dict_Arabic.add( "S", Object( objName, "D" ) );
+
+ Object array_obj;
+ {
+ Object zero;
+ zero.initInt( 0 );
+ Object n_page;
+ n_page.initInt( nog.count_prefatory_page() );
+ array_obj.initArray(static_cast(0));
+ Array *const array = array_obj.getArray();
+ array>add( &zero );
+ array>add( &dict_Roman_obj );
+ array>add( &n_page );
+ array>add( &dict_Arabic_obj );
+ }
++ Array &array = *( new Array( static_cast(0) ) );
++ array.add( Object( 0 ) );
++ array.add( Object( &dict_Roman ) );
++ array.add( Object( nog.count_prefatory_page() ) );
++ array.add( Object( &dict_Arabic ) );
+
+ Object dict_obj;
+ char s_Nums[] = "Nums";
+ {
+ dict_obj.initDict(static_cast(0));
+ Dict *const dict = dict_obj.getDict();
+ dict>add( s_Nums, &array_obj );
+ }
++ Dict &dict = *( new Dict( static_cast(0) ) );
++ dict.add( "Nums", Object( &array ) );
+
+ Object ref_obj;
+ ref_obj.initRef( n_obj(pdf), 0 );
++ Object ref( n_obj(pdf), 0 );
+
+ char s_PageLabels[] = "PageLabels";
+ char s_Outlines [] = "Outlines" ;
+ catalog>add( s_PageLabels, &dict_obj );
+ catalog>add( s_Outlines , &ref_obj );
++ catalog.add( "PageLabels", Object(&dict) );
++ catalog.add( "Outlines" , move(ref) );
+
+ string res;
+ {
+@@ 125,6 +127,7 @@ string PDF::update_catalog(
+ }
+
+ // Do print() to a string rather than to stdout or a file.
++ Object catalog_obj( &catalog );
+ {
+ int fd[2];
+ pipe(fd);
+@@ 154,25 +157,19 @@ string PDF::add_title_to_info(
+
+ PDF_rep *const rep = pdf.get_PDF_rep(magic);
+
+ Object info_obj;
+ info_obj.initDict(static_cast(0));
+ Dict *info = info_obj.getDict();
++ Dict &info = *( new Dict( static_cast(0) ) );
+ {
+ set keys;
+ copy_but( info, rep>info, keys );
++ copy_but( &info, rep>info, keys );
+ }
+
+ char s_Title[] = "Title";
++ GooString &gs = *( new GooString );
++ const char s_Title[] = "Title";
+ {
+ Object obj_old;
+ info>lookup( s_Title, &obj_old );
++ Object &obj_old = *( new Object( info.lookup( s_Title, 0 ) ) );
+ if ( obj_old.isNull() ) {
+ Object obj_new;
+ {
+ GooString &gs = *( new GooString( title.c_str() ) );
+ obj_new.initString( &gs );
+ }
+ info>add( s_Title, &obj_new );
++ gs.Set( title.c_str() );
++ info.add( s_Title, Object( &gs ) );
+ }
+ }
+
+@@ 187,6 +184,7 @@ string PDF::add_title_to_info(
+ }
+
+ // Do print() to a string rather than to stdout or a file.
++ Object info_obj( &info );
+ {
+ int fd[2];
+ pipe(fd);
+@@ 221,34 +219,22 @@ string PDF::update_trailer(
+ // To understand this code, refer to the Libpoppler headers and
+ // to Adobe's PDF Reference 1.7, sect. 3.4.4.
+
+ Object new_trailer_obj;
+ new_trailer_obj.initDict(static_cast(0));
+ Dict *new_trailer = new_trailer_obj.getDict();
++ Dict &new_trailer = *( new Dict( static_cast(0) ) );
+ {
+ set keys;
+ { char s[] = "Size"; keys.insert(s); }
+ { char s[] = "Prev"; keys.insert(s); }
+ { char s[] = "ID" ; keys.insert(s); }
+ copy_but( new_trailer, rep>trailer, keys );
+ }
+
+ char s_Size[] = "Size";
+ {
+ Object obj;
+ obj.initInt( n_pdf_obj );
+ new_trailer>add( s_Size, &obj );
++ copy_but( &new_trailer, rep>trailer, keys );
+ }
+
+ char s_Prev[] = "Prev";
+ {
+ Object obj;
+ obj.initInt( offset_last_xref_table(pdf_filename) );
+ new_trailer>add( s_Prev, &obj );
+ }
++ new_trailer.add( "Size", Object( n_pdf_obj ) );
++ new_trailer.add( "Prev", Object( offset_last_xref_table(pdf_filename) ) );
+
+ string res = "trailer\n";
+
+ // Do print() to a string rather than to stdout or a file.
++ Object new_trailer_obj( &new_trailer );
+ {
+ int fd[2];
+ pipe(fd);
+ derivations0.56.20180123.1.orig/btool/filltocends
++++ derivations0.56.20180123.1/btool/filltocends
+@@ 25,7 +25,7 @@ function toc_line {
+ }
+
+ toc_line Contents
+cat $OLDTOC
++sed re 's/%?[[:space:]]*$//' $OLDTOC
+ toc_line Bibliography
+ toc_line Index
+
diff pruN 0.53.201204142/debian/patches/poppler_api_change 0.56.20180123.12/debian/patches/poppler_api_change
 0.53.201204142/debian/patches/poppler_api_change 20120218 17:48:01.000000000 +0000
+++ 0.56.20180123.12/debian/patches/poppler_api_change 19700101 00:00:00.000000000 +0000
@@ 1,13 +0,0 @@
Index: derivations0.52.20100310/btool/PDF/PDF.cc
===================================================================
 derivations0.52.20100310.orig/btool/PDF/PDF.cc 20100309 22:32:55.000000000 +0100
+++ derivations0.52.20100310/btool/PDF/PDF.cc 20120218 15:13:23.150121718 +0100
@@ 8,7 +8,7 @@
 }

 int PDF::offset_last_xref_table( const PDF &pdf ) {
 return pdf.rep>xref>getLastXRefPos();
+ return pdf.rep>xref>getEntry(0)>offset;
 }

 PDF::Iref PDF::iref_catalog( const PDF &pdf ) {
diff pruN 0.53.201204142/debian/patches/series 0.56.20180123.12/debian/patches/series
 0.53.201204142/debian/patches/series 20150531 22:40:34.000000000 +0000
+++ 0.56.20180123.12/debian/patches/series 20190105 01:00:00.000000000 +0000
@@ 1,5 +1 @@
build_fixes
upstream_helper
poppler_api_change
cxxflags
fillpopplersxreftablegap
+fromstretchtowardbuster
diff pruN 0.53.201204142/debian/patches/upstream_helper 0.56.20180123.12/debian/patches/upstream_helper
 0.53.201204142/debian/patches/upstream_helper 20120218 10:38:41.000000000 +0000
+++ 0.56.20180123.12/debian/patches/upstream_helper 19700101 00:00:00.000000000 +0000
@@ 1,2358 +0,0 @@
Description: development helper
 These are meant to help Derivations development, but is not needed
 to use it in any way. Read helper/README to know more about it.
Author: Marc DequÃ¨nes (Duck)


The information above should follow the Patch Tagging Guidelines, please
checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
are templates for supplementary fields that you might want to add:

Origin: ,
Bug:
BugDebian: http://bugs.debian.org/
BugUbuntu: https://launchpad.net/bugs/
Forwarded:
ReviewedBy:
LastUpdate:

 /dev/null
+++ derivations0.52.20100310/helper/RelSteps
@@ 0,0 +1,164 @@
+
+
+
+DEVELOPMENT AND RELEASE STEPS
+
+
+
+Although one need not develop Derivations in a Debian GNU/Linux
+environment, the authorwho happens to be a Debian user and
+developerdoes so. This file documents some of the author's
+development practices in that context. (The file does not carefully
+distinguish between actions the author takes as upstream developer and
+actions he takes as Debian developer, though the two roles are indeed
+logically distinct.)
+
+In all the file,
+
+ * $D1 represents the top development directory;
+ for example, ~/der/derivations0.5.20070322.
+ * $DW represents the top development directory before it has been
+ assigned an actual revision date (see $TW below).
+ * $DL represents the top development directory of the last revision.
+ * $DP represents $D1/.., $DW/.. and $DL/.., the parent
+ of the top development directory, as ~/der.
+ * $T1 represents the revision date; for example 20070322.
+ * $TW represents the working revision date, inasmuch as the actual
+ revision date normally is not known until release time. (For this
+ date, the author usually but not always chooses the next day not a
+ Sunday following the actual date of the last revision.)
+ * $DSC represents the Debian .dsc file, where the needed Debian source
+ package files in $DP are as
+
+ derivations_0.5.20070322.orig.tar.gz
+ derivations_0.5.200703221.diff.gz
+ derivations_0.5.200703221.dsc
+
+ * $DIFFGZ and $ORIGTARGZ represent the .diff.gz and .orig.tar.gz
+ as above.
+ * $DEB represents the Debian binary package,
+ as derivations_0.5.200703221_all.deb.
+
+The file naturally is not a complete set of instructions, being rather
+notes to jog the author's memory. For example, the file does not
+explain what the command "view foo" is for, though of course it is for
+the author to review foo to ensure that all is in order, making
+corrections if necessary. However, there is probably enough information
+here to be of use to a future developer.
+
+
+BEGINNING DEVELOPMENT OF A NEW REVISION
+
+
+The author opens development of a new revision of the book by
+approximately the following steps.
+
+1. Optionally, or if $DL does not exist,
+
+ cd $DP
+ rm R $DL  true
+ dpkgsource x $DSC
+ cd $DL
+ bash helper/letexec
+ cd ..
+
+2. Give the commands
+
+ cd $DL
+ fakeroot debian/rules clean
+ cd ..
+ cp ai $DL $DW
+ cd $DW
+ helper/extendchangelog
+ helper/updatedate $TW
+
+
+RELEASING
+
+
+The author closes development of a new revision of the book and prepares
+it for release by approximately the following steps.
+
+1. Decide on an official revision date, then
+
+ cd $DP
+ mv i $DW $D1
+ cd $D1
+ helper/updatedate $T1
+ fakeroot debian/rules clean
+ helper/buffesrc
+
+2. Repeatedly give the following commands, making corrections as
+indicated, until no further corrections remain to be made:
+
+ helper/ispellall
+ make clean tex/check  less
+ fakeroot debian/rules clean
+ helper/buffesrc
+
+During the "make tex/check", index entries can overfill their margins
+by any amount less than 9.0pt. Where necessary (hopefully not often),
+any page can be less than 2.0pt too tall. (These limits may be too
+strait, but experience has not yet asked the author to judge looser
+limits; so, these are the limits to enforce for now.)
+
+3. Finalize doc/changelog and debian/changelog with the following
+commands. Change the distribution in debian/changelog from experimental
+to unstable if appropriate.
+
+ cd ..
+ diff ruN $DL $D1 >diff
+ grep B3 A3 '19\(69\70\)' diff
+ view diff
+ cd $D1
+ vim doc/changelog
+ vim debian/changelog
+ fakeroot debian/rules clean
+ helper/buffesrc
+ cd ..
+ diff ruN $DL $D1 >diff
+ grep B3 A3 '19\(69\70\)' diff
+ cd $D1
+
+4. Optionally,
+
+ fakeroot debian/rules cleandebforce
+
+5. Because the author actually uses the Debianized source to develop,
+create a nonDebianized upstream source by
+
+ fakeroot debian/rules orig
+
+6. Omitting the uc and us if it is wanted actually digitally to sign
+the sources, build source and binary package files by
+
+ dpkgbuildpackage rfakeroot uc us
+ fakeroot debian/rules clean
+ cd ..
+
+7. Optionally review the .diff.gz to ensure that it actually contains
+the Debianization as it ought.
+
+8. Give the command
+
+ sudo dpkg i $DEB
+
+9. Check that it has seemed to install correctly.
+
+10. Optionally upload.
+
+
+
+For lack of a better place to note it, it is here noted that the author
+might check the following on completing a chapter:
+
+ * /\~\$ (short math)
+ * /\(^\[[:space:]]\)\$ (long math)
+ * /\$\('s\th\)\([^az]\$\) (no tied *'s or *th)
+ * /}[,.?;:!] (\emph{text[,.?;:!]})
+ * /\\\\ (no \\ at the end of an array or eqnarray)
+ * /\(^\[[:space:]]\)(\\ref{ (no untied eqn references)
+ * /^[[:space:]]*\\emph (no \emph inadvertently in place of \index)
+ * /\\index.*\![[:space:]] (no space following `!' in \index)
+ * /bad break (bad breaks are in fact bad)
+
 /dev/null
+++ derivations0.52.20100310/helper/buffesrc
@@ 0,0 +1,16 @@
+#! /bin/bash e
+
+# (See the notes in `./buffexpand'.)
+#
+# This script is successfully called from the main source directory as
+# `helper/buffesrc'. It buffs and expands all the source files, except
+# that it only buffs the two Makefiles `Makefile' and `debian/rules'.
+
+BUFFE='helper/buffexpand M'
+for F in `find .` ; do
+ if [ f $F ] ; then
+ $BUFFE $F
+ echo $F
+ fi
+done
+
 /dev/null
+++ derivations0.52.20100310/helper/makereadme
@@ 0,0 +1,271 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# This script generates an appropriate README from the following listed
+# sections of the manpage. It clobbers the existing README.
+#
+# (This highly specialized helper script is perhaps the result of
+# overenthusiasm. It automates an otherwise slightly annoying
+# packagemaintenance task, but in retrospect it is not clear that the
+# effort spent in writing the script justifies the gain. Nevertheless,
+# I like the script. Here it is. THB)
+#
+# (By the way, I thought about extending the script to autogenerate the
+# long description in debian/control. However, overenthusiasm has
+# bounds. The long description is twenty times as important as the
+# README. It merits manual crafting. But maybe we should generate the
+# manpage "SUMMARY" section from the long description? No, not
+# today. THB)
+#
+# The Makefile and debian/rules probably should not invoke this script.
+# Probably only the developer should invoke it, manually, if and when he
+# wants to.
+#
+# As a developer, you do not need to use this script. You can write
+# your own README if you want to. The only reason the script exists is
+# that the author couldn't really think of anything at the moment to
+# write in the README which wasn't already in the manpage, but if you
+# can think of something else to write there, go right ahead. However,
+# if you do use this script and if you modify it, note the "Make special
+# corrections" block below.
+#
+# One possible use of this script is to autogenerate a candidate README
+# which you then manually edit.
+
+# Relevant manpage sections.
+our @sh = (
+ 'DESCRIPTION',
+ 'READING THE BOOK',
+ 'AUTHOR',
+);
+
+our $marker = "\001";
+our $headlead_trad = 'The Debian Package';
+our $mark_lic = qr/^Copyright\s+\(C\)/;
+our $time_dflt = '00:00:00 +0000';
+our $cmd_date = 'date uRd';
+our $cmd_fmt = "fmt w${Def::width} u";
+our $cmd_tempfile = 'tempfile';
+
+my $manpage = "${FindBin::RealBin}/../doc/${Def::out}.${Def::mansect}";
+my $deb_cprt = "${FindBin::RealBin}/../debian/copyright";
+my $readme = "${FindBin::RealBin}/../${Def::name_readme}";
+my $bar = '' x ${Def::width} ."\n";
+my $mp_date;
+my $mp_author;
+my $mp_title;
+
+# Subroutine: splice lines ending in backslashnewline.
+sub splice_lines (;\@) {
+ local $_ = @_ ? shift : \$_;
+ for my $i ( reverse 0 .. $#$_ ) {
+ chomp $_>[$i];
+ next unless $_>[$i] =~ /\\$/;
+ chop $_>[$i];
+ splice @$_, $i, 2, $_>[$i] . $_>[$i+1] . "\n" if $i < $#$_;
+ }
+ $_ .= "\n" for @$_;
+ return $_;
+}
+
+our @escape_save = ();
+# Subroutines: recognize, convert, save and restore escaped characters.
+sub escape (;\$) {
+ local $_ = @_ ? shift : \$_;
+ @escape_save = ();
+ $$_ =~ /$marker/ and die "$0: marker character is reserved\n";
+ my $ends_newline = $$_ =~ /\n\z/;
+ chomp $$_;
+ {
+ my $i;
+ while ( ( $i = index $$_, '\\' ) >= 0 ) {
+ substr( $$_, $i, 5 ) =~ /^\\\*\(/
+ and push( @escape_save, substr( $$_, $i, 5, $marker ) ), next;
+ substr( $$_, $i, 4 ) =~ /^\\\(/
+ and push( @escape_save, substr( $$_, $i, 4, $marker ) ), next;
+ push( @escape_save, substr( $$_, $i, 2, $marker ) );
+ }
+ }
+ $$_ .= "\n" if $ends_newline;
+ return $$_;
+}
+sub convescape () {
+ for ( @escape_save ) {
+ $_ =~ /^\\&$/ and $_ = '' , next;
+ $_ =~ /^\\$/ and $_ = '' , next;
+ $_ =~ /^\\\(em$/i and $_ = '', next;
+ $_ =~ /^\\\*\(lq$/i and $_ = '"' , next;
+ $_ =~ /^\\\*\(rq$/i and $_ = '"' , next;
+ }
+}
+sub unescape (;\$) {
+ local $_ = @_ ? shift : \$_;
+ while ( @escape_save ) {
+ my $c = shift @escape_save;
+ $$_ =~ s/$marker/$c/;
+ }
+ @escape_save = ();
+ return $$_;
+}
+sub convall (;\$) {
+ local $_ = @_ ? shift : \$_;
+ defined $$_ or return $$_;
+ escape $$_;
+ convescape ;
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: dequote a quoted string.
+sub dequote (;\$) {
+ local $_ = @_ ? shift : \$_;
+ chomp $$_;
+ escape $$_;
+ $$_ =~ s/^\s*"([^"]*?)"\s*$/$1/;
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: collapse an alternating emphasizor.
+sub collapse (;\$) {
+ local $_ = @_ ? shift : \$_;
+ chomp $$_;
+ escape $$_;
+ my @w = $$_ =~ /"[^"]*?"[^"\s]+/g;
+ dequote for @w;
+ $$_ = join( '', @w );
+ unescape $$_;
+ return $$_;
+}
+
+# Subroutine: format text to a maximum width.
+sub format_text (@) {
+ my $file = `$cmd_tempfile`; chomp $file;
+ open FILE, '>', $file;
+ print FILE @_;
+ close FILE;
+ my @ret = `$cmd_fmt $file`;
+ unlink $file;
+ return @ret;
+}
+
+# Read the manpage in.
+my @man;
+open MAN, '<', $manpage;
+ @man = ;
+close MAN;
+splice_lines @man;
+
+# Parse the manpage.
+my %sect;
+{
+ my $sh;
+ my $text = [];
+ for ( @man ) {
+ next unless /\S/;
+ my( $cmd, $arg ) = /^\.(\S+)(?:\s+(\S(?:.*?\S)??))??\s*$/;
+ if ( defined $cmd ) {
+ if ( $cmd =~ /^(?:BI)$/i ) {
+ dequote $arg;
+ $_ = "$arg\n";
+ $cmd = undef;
+ }
+ elsif ( $cmd =~ /^(?:BRRBIRRIBIIB)$/i ) {
+ collapse $arg;
+ $_ = "$arg\n";
+ $cmd = undef;
+ }
+ elsif ( $cmd =~ /^TH$/i ) {
+ ( $mp_date, $mp_author, $mp_title ) = $arg =~
+ /^.*"([^()"]*?)"\s*"([^()"]*?)"\s*"([^()"]*?)"\s*$/
+ or die "$0: cannot parse .TH line";
+ }
+ elsif ( $cmd =~ /^SH$/i ) {
+ $sect{$sh} = $text if defined $sh;
+ $text = [];
+ $sh = $arg;
+ dequote $sh;
+ }
+ elsif ( $cmd =~ /^PP$/i ) {
+ $_ = undef;
+ $cmd = undef;
+ }
+ # (Ignore lines beginning with other commands.)
+ }
+ push @$text, $_ unless defined $cmd;
+ }
+ $sect{$sh} = $text if defined $sh;
+ $text = undef;
+ $sh = undef;
+}
+
+# If debian/copyright exists, pull licensing text from it.
+my @lic;
+if ( e $deb_cprt ) {
+ my @lic0;
+ open CPRT, '<', $deb_cprt;
+ {
+ my $in = '';
+ while ( ) {
+ $in = '1' if /$mark_lic/;
+ $in or next;
+ push @lic0, $_;
+ }
+ }
+ close CPRT;
+ @lic = format_text @lic0;
+ unshift @lic, $bar, "\n";
+ push @lic, "\n";
+}
+
+# Calculate the manpage date, then prepare the readme's header and
+# footer.
+my $date = `$cmd_date '$mp_date $time_dflt'`; chomp $date;
+my @head = (
+ $Def::traditional_readme
+ ? $headlead_trad . " ${Def::out}\n"
+ : ${Def::full_title} . "\n"
+);
+my @foot = ( "${Def::author} <${Def::email}>\n", "$date\n" );
+if ( $Def::traditional_readme ) {
+ push @head, '' x (length($head[0])1) . "\n";
+}
+else {
+ unshift @head, $bar;
+ unshift @head, "\n";
+ push @head, $bar;
+ unshift @foot, $bar;
+ push @foot, "\n";
+}
+push @head, "\n";
+
+# Make special corrections.
+if ( defined $sect{'AUTHOR'} ) {
+ for ( @{ $sect{'AUTHOR'} } ) {
+ next if s/^(The book) (and this manpage are\b)/$1 is/;
+ next if s/^(${Def::out})$/'$1'/;
+ next if s/^(in which) (they are) (distributed.)/$1 the book is $3/;
+ }
+}
+
+# Build the readme.
+my @body0;
+for my $sh ( @sh ) {
+ defined $sect{$sh} or next;
+ convall for @{ $sect{$sh} };
+ push @body0, map { defined() ? $_ : "\n" } @{ $sect{$sh} };
+ push @body0, "\n";
+}
+my @body = format_text @body0;
+my @readme = ( @head, @body, @lic, @foot );
+
+# Write the readme out.
+open README, '>', $readme;
+ print README @readme;
+close README;
+
 /dev/null
+++ derivations0.52.20100310/helper/updatedebver
@@ 0,0 +1,61 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# This script adds or updates the Debian package version number at the
+# head of debian/changelog.
+
+our $usage = <;
+close CL;
+
+# Update the changelog's first line.
+$cld[0] =~
+ s/^(${Def::out} \(${Def::basever}\.\d+)((?:\d+)?)(\))/$1$vdstr$3/
+ or die "$0: can't parse the first line of debian/changelog\n";
+
+# Write the changelog out.
+open CL, '>', $changelog_deb;
+ print CL @cld;
+close CL;
+
 /dev/null
+++ derivations0.52.20100310/helper/FilesTex
@@ 0,0 +1,26 @@
+main
+sphere
+pref
+intro
+alggeo
+trig
+drvtv
+cexp
+noth
+integ
+taylor
+inttx
+cubic
+matrix
+gjrank
+mtxinv
+eigen
+vector
+vcalc
+fours
+fouri
+stub
+hex
+greek
+purec
+hist
 /dev/null
+++ derivations0.52.20100310/helper/Def.pm
@@ 0,0 +1,24 @@
+use warnings;
+use strict;
+package Def;
+
+# This file defines common parameters for some of the helper scripts.
+
+our $out = 'derivations';
+our $full_title = 'Derivations of Applied Mathematics';
+our $author = 'Thaddeus H. Black';
+our $email = 'thb@derivations.org';
+our $email_deb = 'thb@debian.org';
+our $width = 72;
+our $basever = '0.52';
+our $main0 = 'main';
+our $bib0 = 'bib';
+our $dist = 'experimental';
+our $urgency = 'low';
+our $mansect = '7';
+our $name_readme = 'README';
+our $traditional_readme = 0;
+our $cl_entry_dflt = '[Add changelog entries here.]';
+
+1;
+
 /dev/null
+++ derivations0.52.20100310/helper/updatedate
@@ 0,0 +1,218 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# Before running this script, you should add a changelog entry (even an
+# empty one) for the current version, if you've not yet done so. The
+# script modifies whichever entry happens to be at the changelog's top.
+#
+# This helper script automatically updates the package's issue date in
+# several files (the files are named in the following block). The
+# script accepts date and time in any format the date(1) command
+# understands, but it writes the date (and time) to each file in that
+# file's appropriate format. Example usage:
+#
+# $ updatedate '24 Jan 2005 21:00 +0000'
+#
+# If no time of day is given, it defaults to 00:00:00 +0000.
+#
+# When also running the helper/updatever script, you probably want to
+# run that script first, then this one. Otherwise debian/changelog is
+# updated in a different manner than you probably thought to update it.
+#
+# Note that the script does not change the name of the toplevel source
+# directory. This is for sanity's sake. You must change the name of
+# the directory yourself.
+#
+# Note also that the self entry in bib.bib, where the book refers to
+# itself, must appear first to run this script. If something else
+# appears first, the script may happily try to update that date,
+# instead.
+
+our $debian = "${FindBin::RealBin}/../debian" ;
+our $maintex = "${FindBin::RealBin}/../tex/${Def::main0}.tex";
+our $mainbib = "${FindBin::RealBin}/../tex/${Def::bib0}.bib" ;
+our $manpage =
+ "${FindBin::RealBin}/../doc/${Def::out}.${Def::mansect}" ;
+our $readme = "${FindBin::RealBin}/../${Def::name_readme}" ;
+our $changelog = "${FindBin::RealBin}/../doc/changelog" ;
+our $changelog_deb = "${debian}/changelog" ;
+our $copyright = "${debian}/copyright" ;
+
+my $warn_msg = "$0: cannot find date line in ";
+
+our $usage = <;
+ @bib = ;
+ @man = ;
+ @rme = ;
+ @cr = ;
+ close MAIN;
+ close BIB ;
+ close MAN ;
+ close RME ;
+ close CR ;
+}
+if ( $do_cl ) {
+ open CL , '<', $changelog ;
+ @cl = ;
+ close CL ;
+}
+if ( $do_deb ) {
+ open CLD , '<', $changelog_deb;
+ @cld = ;
+ close CLD ;
+}
+
+if ( $do_main ) {
+
+ # Update the dates in the main TeX source file.
+ $_ = 0; ++$_ until $_ > $#main 
+ $main[$_] =~
+ s/^(\\newcommand\{\\verdate\}\{)([^\{\}]+)(\})\s*?$/$1$date$3/;
+ $_ <= $#main or warn "$warn_msg$maintex\n";
+ $_ = 0; ++$_ until $_ > $#main 
+ $main[$_] =~ s/^(Copyright \\copyright\\ \d+)(\d+)/$1$year/;
+ $_ <= $#main or warn "$warn_msg$maintex\n";
+
+ # Update the date in the bibliography's (leading) selfentry.
+ $_ = 0; ++$_ until $_ > $#bib 
+ $bib[$_] =~ s/^(\s*year=\{)([^\{\}]+)(\},)\s*?$/$1$date$3/;
+ $_ <= $#bib or warn "$warn_msg$mainbib\n";
+
+ # Update the dates on the manpage.
+ $man[0] =~ s/(")([^"]*?)("\s*\\)$/$1$date$3/
+ or warn "$warn_msg$manpage\n";
+ $_ = $#man; $_ until $_ < 0 
+ $man[$_] =~ s/^(Copyright \(C\) \d+\\)(\d+)/$1$year/;
+ $_ >= 0 or warn "$warn_msg$manpage\n";
+
+ # Update the date at the foot of the readme and the copyright year
+ # therein.
+ $_ = $#rme; $_ until $_ < $#rme1 
+ # $rme[$_] =~ s/^(.*, )([^,\n]*,[^,\n]*)$/$1$cld/;
+ $rme[$_] =~ s/^.*\S.*$/$cld/;
+ $_ >= $#rme1 or warn "$warn_msg$readme\n";
+ $_; $_ until $_ < 0 
+ $rme[$_] =~ s/^(Copyright \(C\) \d+)(\d+)/$1$year/;
+ $_ >= 0 or warn "$warn_msg$readme\n";
+
+ # Update the year in the copyright file.
+ $_ = 0; ++$_ until $_ > $#cr 
+ $cr[$_] =~ s/^(Copyright \(C\) \d+)(\d+)/$1$year/;
+ $_ <= $#cr or warn "$warn_msg$copyright\n";
+
+}
+
+if ( $do_cl ) {
+
+ # Update the dates in the main changelog.
+ unless ( $opt{s} ) {
+ $cl[0] =~ s/(\([^()]*\.)(\d+)([^().]*?\))/$1$verd$3/
+ or warn "$warn_msg$changelog\n";
+ }
+ {
+ my $authe = "${Def::author} <${Def::email}>";
+ $_ = 0; ++$_ until $_ > $#cl 
+ $cl[$_] =~ s/^(  $authe )(\S(?:.*?\S)??)\s*$/$1$cld\n/;
+ $_ <= $#cl or warn "$warn_msg$changelog\n";
+ }
+
+}
+
+if ( $do_deb ) {
+
+ # Update the dates in the debian/changelog.
+ unless ( $opt{s} ) {
+ # Update the version date atop debian/changelog.
+ $cld[0] =~ s/(\([^()]*\.)(\d+)([^().]*?\))/$1$verd$3/
+ or warn "$warn_msg$changelog_deb\n";
+ }
+ {
+ my $authe = "${Def::author} <${Def::email_deb}>";
+ $_ = 0; ++$_ until $_ > $#cld 
+ $cld[$_] =~ s/^(  $authe )(\S(?:.*?\S)??)\s*$/$1$cld\n/;
+ $_ <= $#cld or warn "$warn_msg$changelog_deb\n";
+ }
+
+}
+
+# Write updated files out.
+if ( $do_main ) {
+ open MAIN, '>', $maintex ;
+ open BIB , '>', $mainbib ;
+ open MAN , '>', $manpage ;
+ open RME , '>', $readme ;
+ open CR , '>', $copyright ;
+ print MAIN @main;
+ print BIB @bib ;
+ print MAN @man ;
+ print RME @rme ;
+ print CR @cr ;
+ close MAIN;
+ close BIB ;
+ close MAN ;
+ close RME ;
+ close CR ;
+}
+if ( $do_cl ) {
+ open CL , '>', $changelog ;
+ print CL @cl ;
+ close CL ;
+}
+if ( $do_deb ) {
+ open CLD , '>', $changelog_deb;
+ print CLD @cld ;
+ close CLD ;
+}
+
 /dev/null
+++ derivations0.52.20100310/helper/buffexpand
@@ 0,0 +1,121 @@
+#! /usr/bin/perl
+
+# This Perl script buffs (removes trailing blanks from) and/or expands
+# (converts tabs to spaces in) the files named on the command line. It
+# also ensures that each file (if not empty) ends in a proper "\n".
+
+use warnings;
+use strict;
+use integer;
+
+our $tab = 8;
+our $pat_makefile_name = qr/Makefile(?:[.].*)?rules/;
+our $usage = <;
+ close FILE;
+ my $has_acted = 0;
+ my @buff_expanded = buff_expand $is_makefile, $has_acted, @line;
+ if ( $stdout ) {
+ print @buff_expanded;
+ }
+ elsif ( $has_acted ) {
+ open FILE, '>', $file
+ or warn( "$0: cannot write $file\n" ), next;
+ print FILE @buff_expanded;
+ close FILE;
+ }
+ }
+}
+else {
+ my @line = <>;
+ my $has_acted = 0;
+ print buff_expand 0, $has_acted, @line;
+}
+
 /dev/null
+++ derivations0.52.20100310/helper/ispellall
@@ 0,0 +1,13 @@
+#! /bin/bash e
+
+# Run this to check all appropriate source files with ispell, using (and
+# adding to) the local development dictionary.
+
+D=$( dirname $0 )
+ispell x p $D/dict \
+ $D/../tex/*.{tex,bib} \
+ $D/../doc/* \
+ $D/../debian/{changelog,control,copyright} \
+ $D/../helper/RelSteps \
+ $( find $D/.. name README )
+
 /dev/null
+++ derivations0.52.20100310/helper/vimmacros
@@ 0,0 +1,6 @@
+map :w:!make derivations.ps
+imap ~(\ref{})
+map Oa \begin{split}\end{split}^
+se tw=72
+se ic
+se et
 /dev/null
+++ derivations0.52.20100310/helper/README
@@ 0,0 +1,35 @@
+
+The files in this directory are development helpers
+only, probably of interest to you only if you are
+developing the package. You do not need these files to
+build, install or use the package, nor strictly do you
+need them even to modify the package. In fact you could
+delete the entire directory without ill effect. If
+modifying the package, however, you may optionally find
+some of the scripts here convenient.
+
+Several of the files are scripts. You can "bash
+letexec" to make these executable.
+
+The file helper/dict is a local dictionary for ispell.
+You can use it with "ispell x p helper/dict
+tex/*.{tex,bib}", for example.
+
+The file helper/vim contains nothing but some macros the
+author likes to use when editing the book with "vim S
+helper/vimmacros tex/foo.tex". It is supposed that
+most people probably prefer making their own little
+editor macros (for their own favorite editors); so,
+unless you like using other people's editor macros for
+some reason, you can ignore the file.
+
+Because these files are just development helpers, they
+are only minimally documented. The comments at or near
+the head of the file explain what each file is and what
+it is for. Refer to the various files for further
+information.
+
+If this helper/ directory interests you, then so
+probably also will the script debian/mkorig and
+the 'orig' and 'origforce' targets in debian/rules.
+
 /dev/null
+++ derivations0.52.20100310/helper/dict
@@ 0,0 +1,1069 @@
+Aa
+ab
+aB
+ABCDEF
+Abramowitz
+absurdum
+ac
+aC
+accrete
+accretes
+adecimal
+Adel
+adjointed
+adjoints
+ae
+af
+agn
+ahlen
+AHNT
+ai
+al
+alebox
+algebraist
+algebraists
+alggeo
+algorithmizes
+aliceinfo
+alphabetization
+alsj
+AMD
+amenably
+amsmath
+AMSmath
+amssymb
+analyses
+Andr
+Andreas
+andrews
+antider
+antiderivative
+antiderivative's
+antiderivatives
+AP
+approximator
+apxe
+apxex
+aQ
+arccosh
+arccosine
+arcsinh
+arctanh
+Arfken
+arg
+Argand
+arounds
+artillerist's
+astronautical
+Augustin
+Automake
+automatable
+az
+bA
+backportability's
+backporting
+bAI
+Bal
+Balachandran
+Balakumar
+Balanis
+Ballantine
+Banos
+baroquity
+basever
+Bb
+Bbb
+BC
+Beattie
+Bernhard
+bh
+BibTeX
+Bienia
+binary's
+binth
+binthe
+BJam
+Bjarne
+bl
+Blacksburg
+Bladel
+Blaise
+bly
+Boca
+boundedness
+BQ
+breview
+BSL
+bT
+bTI
+btool
+BU
+buffe
+buildpackage
+Burlington
+businesspeople
+businessperson
+BW
+BWH
+byu
+ca
+CAC
+cade
+cally
+Cardan
+Cardano
+Cardano's
+Cardanus
+cauchy
+cauchyf
+cauchyn
+cc
+ccc
+ccccc
+ccccccc
+ccccccccc
+ccccccccccccc
+cd
+Ce
+cern
+cexp
+ch
+changelog
+changelogs
+Cholesky
+Chs
+ci
+cidfmap
+cis
+cit
+citable
+citational
+Civita
+Civita's
+cleandeb
+cleanless
+cleardoublepage
+cls
+co
+cois
+Colchester
+columnwise
+com
+commutivity
+complementarily
+composable
+congener
+congeners
+conj
+const
+conventionalize
+convolutional
+coords
+COPYLEFT
+correlational
+cos
+cosh
+cosint
+countability
+counterchallenged
+Courant
+Courant's
+couth
+cp
+Cramer's
+CRC
+cred
+crrrrrrrrrc
+CT
+curl's
+CX
+CXLVII
+cyc
+cyl
+dcurl
+DCXCIII
+Dd
+ddiv
+ddsf
+ddvf
+de
+Debconf
+debhelper
+debian
+Debian's
+Debianization
+Debianized
+Debianizing
+debver
+def
+deferral
+defz
+del
+delp
+deltatr
+Demystified
+der
+derivational
+deSturler
+det
+dev
+dexp
+df
+dfsg
+dg
+dh
+di
+diag
+diagn
+diagonal's
+diagonalizability
+diagonalization
+diagonalize
+diagonalized
+dict
+diff
+DIFFGZ
+dimen
+dimensionalities
+dimensionlessness
+dird
+dirdrvtv
+discretization
+discretize
+discretized
+discretizing
+discursion
+dists
+div
+divergenceless
+divg
+divthm
+DL
+dln
+dlnz
+DLUI
+documentclass
+Doetsch
+dotm
+dotmag
+dotre
+dp
+dpkg
+dr
+drvtv
+dS
+ds
+dsc
+dt
+du
+dup
+dv
+dvi
+DVLARCH
+dw
+dx
+dy
+dz
+dZ
+editional
+Edouard
+edu
+ee
+Ei
+eigdet
+eigen
+eigensolution
+eigensolutions
+eigenvalued
+eigenvector's
+electromagnetics
+elementaries
+elementarized
+elementary's
+elfs
+emph
+en
+encyclopedically
+enkindle
+ent
+ents
+enumt
+Ep
+Eq
+eqn
+eqnarray
+eqns
+equidistance
+er
+ere's
+erman
+esp
+etch's
+ete
+euler
+EWW
+exch
+exp
+expint
+exps
+extremum
+factorable
+factorials
+failproof
+fakeroot
+ff
+fh
+fi
+fillcolor
+fillstyle
+Fjeld
+foo
+footnotation
+Foresman
+formable
+forwardmost
+foundational
+fouri
+fourier
+Franciscus
+Friedberg
+Friedrich
+frullani
+Frullani's
+FSF's
+ftp
+genitival
+geo
+Geophys
+getCString
+getKey
+Gg
+gh
+ghostscript
+gibbs
+Gilbey
+gimbal
+gimbals
+Girolamo
+Giuliano
+GJ
+GJpart
+gjrank
+GJt
+GJtC
+gk
+Glenview
+Gorbag
+Gottfried
+Goursat
+gov
+gpl
+gra
+Graeco
+grat
+greek
+grep
+groff
+gt
+gtrsim
+Guillaume
+gv
+gz
+gzip
+Hafner
+Hamming's
+Hankel
+harvard
+hatchangle
+hatchwidth
+Hefferon
+Henk
+Hermann
+hert
+Hessenberg
+Hestenes
+Hh
+Hilburn
+hist
+Hm
+Hopman
+Houghton
+howpublished
+hq
+htm
+html
+http
+hubristic
+hy
+hypergeometric
+hyperlink
+i'j
+ia
+iA
+IAe
+iau
+iB
+ib
+ic
+iff
+ij
+ijk
+ijn
+ik
+iK
+ikj
+im
+IMC
+imn
+imum
+inbook
+Indep
+indicial
+infinitifold
+ing
+Insel
+int
+integ
+integrability
+integrand's
+integrands
+integrodifferential
+Interscience
+intinvz
+inttx
+invdet
+invertibility
+invxform
+ip
+Iref
+irreducibility
+irrotational
+iso
+Issai
+ited
+iu
+iuF
+iuf
+ively
+iwh
+iy
+iz
+j'k
+jargonal
+jb
+jh
+ji
+jik
+jj
+jj'k
+JJH
+jjk
+jk
+jki
+jmax
+jn
+Jochens
+Jolley
+joshua
+jp
+jr
+jT
+jU
+ju
+ka
+karlscalculus
+Kernighan
+Kernighan's
+kf
+kh
+Khamsi
+kI
+kij
+Kitaigorodskii
+kj
+kji
+kk
+kkt
+kkz
+kn
+Knopp
+Knopp's
+Knoppix
+Kohler's
+kp
+KRHB
+kron
+Krylov
+KSI
+kt
+kx
+kz
+l'H
+l'Hopital's
+L'Hopital's
+l'Hospital's
+L'Hospital's
+labelsep
+laborlawtalk
+Lapack
+laplacian
+laurent
+LCHandout
+leastsq
+Lebedev
+Leibnitz
+Leibnitz's
+lenny
+Leonhard
+lesssim
+letexec
+Lf
+lhopital
+libpoppler
+Libpoppler's
+Lightfoot
+lightgray
+lim
+linearalgebra
+linecolor
+linestyle
+linewidth
+lintian
+literal's
+LJDRP
+Ll
+lllcl
+lmodern
+ln
+localscalebox
+Lodovico
+logdef
+lor
+Lothar
+lppl
+lpr
+LUI
+LUP
+mA
+Maclaurin
+MacTutor
+Magnus
+mAI
+maintenv
+majorization
+majorize
+majorizes
+majorizing
+makeindex
+manpage
+manu
+marginalizing
+mathbb
+mathbios
+mathworld
+mC
+mcs
+mD
+mDI
+mdots
+Melc
+meromorphic
+metasyntactic
+meth
+mf
+mfd
+mG
+mH
+mI
+Microelectronic
+Mifflin
+min
+Mineola
+minimalistically
+minorization
+misc
+mislinking
+Mittra
+mj
+mk
+mK
+mkdiff
+mKI
+mkorig
+mL
+mLI
+mn
+mni
+Moivre
+Moivre's
+monophthongal
+Mosig
+Mosig's
+Moskowitz
+mP
+mPDLUI
+mPI
+mT
+MTC
+mtxalg
+mtxinv
+mU
+mUI
+mul
+multidigit
+multiline
+multisource
+multitarget
+mv
+mx
+myrad
+myscale
+mz
+nA
+na
+nAI
+narg
+nasa
+Navier
+Nayfeh
+nB
+nC
+neg
+nG
+nglish
+ni
+Niccol
+nint
+nj
+nk
+nn
+nonanalytic
+nonassociative
+nonassociativity
+noncommutivity
+nonconjugate
+Nonconvergent
+nondegenerate
+nondiagonalizability
+nondiagonalizable
+nonentire
+nonexchanges
+Nongeometrical
+nonintegral
+noninterchanges
+noninvertibility
+noninvertible
+nonisotropic
+nonobvious
+nonoptimal
+nonoverdetermined
+nonplanar
+nonrectangular
+nonredundant
+nonrepeating
+nonsquare
+nontrivially
+nonunderdetermined
+nonunique
+nonvector
+nonzeros
+northwestward
+nos
+Nostrand
+noth
+nounal
+nP
+nPDLUKS
+nPDLUU
+nQ
+NR
+nSI
+nT
+nTI
+Nussbaum
+nwh
+Oaxtepec
+od
+Offline
+oH
+oI
+oo
+opital
+opital's
+ops
+org
+orienteering
+orig
+ORIGTARGZ
+orthogonalize
+orthogonalizes
+orthogonalizing
+orthographically
+orthonormality
+orthonormalization
+orthonormalize
+orthonormalized
+orthonormalizes
+orthonormalizing
+orthonormally
+outlier
+overappreciated
+overdetermine
+overdetermined
+oW
+P'D'L'U'I
+p'i
+p'q
+paraboloids
+parochiality
+Parseval
+Parseval's
+pdf
+pdfetex
+PDFLaTeX
+PDFTricks
+PDLU
+PDLUI
+PDLUKS
+PDLUKSI
+Pedersen
+Penrose
+peri
+permutor
+permutor's
+permutors
+perp
+perpendicular's
+perpendicularity
+perspectived
+Pfufnik
+PGF
+pgn
+phasor
+phen
+Piscataway
+pj
+pl
+planetm
+PlanetMath
+planetmath
+plotpoints
+pm
+pointwise
+polyderiv
+polyderivz
+PostScript
+postulational
+Poynting
+Pp
+pq
+pre
+Precalculus
+predeclared
+pref
+preloads
+preshifted
+primeprf
+proddiv
+professional's
+ps
+pseudoinverse
+pseudoinversion
+psinv
+psselect
+pst
+pstricks
+PSTricks
+psutils
+pt
+PTR
+purec
+px
+pythag
+Q'H
+Q'R
+qp
+Qq
+QR
+QRinv
+qs
+qu
+quadeq
+quartic
+quartic's
+quartics
+quasielementaries
+quasielementary
+QUDS
+quintic
+Qw
+rA
+rad
+rAI
+Raj
+randomish
+Raphson
+Raton
+rB
+rC
+rcccl
+rcccll
+rCI
+rcl
+rclcl
+rclcrcl
+rclcrclcrcl
+rcr
+rcrccccl
+rcrcl
+rcrcrcl
+README
+reassociate
+Reassociating
+RECT
+redacted
+redaction
+Redlin
+reductio
+reinitialization
+RelSteps
+reorienting
+reorients
+rerotation
+rescale
+Rescripted
+resolvent
+reversibly
+Rewove
+rfakeroot
+rG
+rgen
+rI
+rj
+rK
+rKH
+rKI
+rKS
+rKSI
+rm
+rr
+rR
+rrr
+rrrr
+rS
+rSA
+rT
+rTI
+rU
+Rugen
+ruN
+S'X
+Sa
+Sadiku
+Saleem
+sarge
+Sarnow
+scalarization
+scalarize
+scalebox
+Schaum's
+schematical
+schur
+SCons
+sed
+Sedra
+Sep
+Shenk
+Shirer
+sid
+sid's
+Sii
+sinc
+sinh
+sinq
+sion
+smcvt
+Smedegaard
+snoitavired
+solenoidal
+Sommerfeld
+sosmath
+specif
+sph
+SPHER
+sq
+src
+SRW
+Ss
+stairstep
+stairsteps
+Stegun
+Stepney
+Stiefel
+stit
+stochastics
+stokesthm
+storable
+stormclouds
+Stratton
+Stroustrup
+struct
+Sturler
+subdiagonals
+subdir
+subdisciplines
+subgrid
+subgrid's
+subgridded
+subimposes
+submatrices
+submatrix
+subrogate
+subseries
+subsubset
+sudo
+summa
+summational
+susan
+SVD
+SVDinv
+SVDr
+sx
+symbology
+Tait
+tal's
+taries
+Tartaglia
+Tartaglia's
+tary
+taylor
+Tdefadd
+Tdefsc
+Tdefxchg
+te
+ters
+teTeX
+tetex
+tex
+TeXLive
+texlive
+thb
+Theo
+Theodor
+thopman
+TikZ
+tk
+tl
+tla
+tlb
+tlc
+tld
+toc
+train's
+Trans
+transform's
+triangleq
+triangulars
+trigonometrics
+trigs
+trop
+truncator
+truncators
+Tt
+Tullio
+TW
+txt
+ua
+ub
+uc
+UDS
+uf
+UI
+uk
+ul
+un
+uncalculatable
+unconfused
+unconservative
+uncontrived
+underappreciated
+underdetermined
+undergird
+unedifying
+unfundamental
+unintegrated
+unitarily
+unitless
+Univ
+unmix
+unpermuted
+unpersuaded
+unpolished
+unreadably
+unremarkably
+unrooted
+unsuggestive
+unsureness
+untainted
+unverifiable
+unvisualizable
+unwarrantedly
+uoguelph
+URL
+usepackage
+usr
+UT
+Uu
+uv
+Vallejo
+vanBladel
+vari
+vcalc
+VD
+vdVorst
+verdate
+Vieta
+Vieta's
+von
+Vorst
+vspace
+Vt
+Vv
+wave's
+Weisstein
+Weisstein's
+Werror
+Weyl
+wg
+wh
+wheth
+Wikimedia
+wikip
+wikipedia
+Wikipedians
+Wilbraham
+Ww
+www
+x'y'z
+x'z'y
+XA
+xform
+xg
+xplora
+xs
+xt
+xunit
+XY
+y'x'z
+y'z'x
+Yan
+york
+ys
+yt
+yunit
+Yy
+z'x'y
+z'y'x
+Zandt's
+zcat
+Zz
 /dev/null
+++ derivations0.52.20100310/helper/extendchangelog
@@ 0,0 +1,86 @@
+#! /usr/bin/perl
+use warnings;
+use strict;
+use integer;
+use FindBin;
+use lib $FindBin::RealBin;
+use Def;
+
+# This script adds empty new entries at the changelogs' tops.
+
+our $usage = <;
+ close CL;
+}
+if ( $do_cld ) {
+ open CL, '<', $changelog_deb;
+ @cld = ;
+ close CL;
+}
+
+# Add changelog entries.
+my $ver = "${Def::basever}.$verdate";
+my $ver_deb = "${ver}1";
+unshift @cl , < $date
+
+END
+unshift @cld, < $date
+
+END
+
+# Write the changelogs out.
+if ( $do_cl ) {
+ open CL, '>', $changelog;
+ print CL @cl;
+ close CL;
+}
+if ( $do_cld ) {
+ open CL, '>', $changelog_deb;
+ print CL @cld;
+ close CL;
+}
+
 /dev/null
+++ derivations0.52.20100310/helper/letexec
@@ 0,0 +1,14 @@
+#! /bin/bash e
+
+# The .diff.gz patch format does not preserve executable flags on files.
+# This is fine, but run the present script as
+#
+# bash helper/letexec
+#
+# after unpacking the source to let the appropriate helpers be
+# executable.
+
+for A in $( find $( dirname $0 ) mindepth 1 maxdepth 1 type f ) ; do
+ sed ne '/^#!/q0;q1' $A && chmod a+x $A
+done
+
 /dev/null
+++ derivations0.52.20100310/helper/deprecated/opennewsource
@@ 0,0 +1,22 @@
+
+# This bash shell pseudoscript opens a new development source tree.
+
+if [[ $MAINTENV != derivations ]] ; then
+ echo 1>&2 "Please run helper/maintenv first."
+ false
+elif [[ ! $PWD ef $E/$P$V.$D1 ]] ; then
+ echo 1>&2 "Please run from the top source directory $E/$D1."
+ false
+elif [[ e $P$V.$D2 ]] ; then
+ echo 1>&2 "Sorry, but $P$V.$D2 already exists."
+ false
+else
+ # Open a full new upstream tree.
+ cd ..
+ cp a $P$V.{$D1,$D2}
+ cd $P$V.$D2
+ bash  helper/letexec
+ helper/extendchangelog
+ helper/updatedate c $D2
+fi
+
 /dev/null
+++ derivations0.52.20100310/helper/deprecated/maintenv
@@ 0,0 +1,65 @@
+
+# This bash shell pseudoscript sets some useful environment
+# variables for derivations package maintenance. You run it
+# by ". helper/maintenv".
+#
+# Before running this pseudoscript you must set the following:
+#
+# D1 the old version date (for example, 19700101)
+# D2 the prospective new version date
+#
+# (It would be neater to make this and the associated pseudoscripts real
+# scripts, but it would also be less useful from the author's point of
+# view. Remember that this is just a development helper. The reason to
+# use pseudoscripts is to keep the various shell variables and functions
+# in the shell's global namespace where the maintainer can manipulate
+# them directly. If you don't want your shell's namespace cluttered,
+# you can spawn a subshell before invoking the pseudoscript.)
+
+if [[ $D1 == ''  $D2 == '' ]] ; then
+ echo 1>&2 "Please set D1 and D2 before running this pseudoscript."
+ false
+elif echo n $VD  grep q '[^09]' ; then
+ echo 1>&2 "The \$VD must be a nonnegative integer."
+ false
+elif [[ $D1 == $D2 && ( $VD == ''  $(( $VD > 1 )) == 0 ) ]] ; then
+ echo 1>&2 "Please set either \$VD > 1 or \$D2 != \$D1."
+ false
+else
+
+ DVLARCH=i386 # the maintainer's own machine architecture
+ E=~/der # the maintainer's development superdirectory
+ P=derivations # the package name
+ V=0.5 # the base version number
+
+ # Derive additional variables.
+ if [[ $VD == '' ]] ; then VD=1 ; fi
+
+ # Define a shell function to build a diff.
+ function mkdiff {
+ if [[ x helper/buffesrc ]] ; then
+ if [[ $(( $VD > 1 )) == 0 ]] ; then
+ DIFF=$E/$P$V.$D2.diff
+ (
+ cd ..
+ diff ruN $P$V.{$D1,$D2} >$DIFF
+ )
+ else
+ DIFF=$E/$P$V.$D1$VD.diff
+ (
+ cd ..
+ diff ruN $P$V.$D1{$(( $VD  1 )),} >$DIFF
+ )
+ fi
+ else
+ echo 1>&2 "You must run this shell function from the top source"
+ echo 1>&2 "directory, and helper/buffesrc must be executable."
+ false
+ fi
+ }
+
+ # Set a flag to mark that the pseudoscript has been run.
+ MAINTENV=$P
+
+fi
+
 /dev/null
+++ derivations0.52.20100310/helper/deprecated/README
@@ 0,0 +1,7 @@
+
+The author thought that these files were a good idea
+when he made them, but they have not proven as useful as
+he had thought they would. The maintenv script might
+still be useful. The other files probably are less
+useful, but for now they are kept here.
+
 /dev/null
+++ derivations0.52.20100310/helper/deprecated/closesource
@@ 0,0 +1,68 @@
+
+# This bash shell pseudoscript closes the development source tree for
+# subsequent building. Note that you can close the source as many times
+# as you like; you needn't (and shouldn't) reopen it each time.
+
+if [[ $MAINTENV != derivations ]] ; then
+ echo 1>&2 "Please run helper/maintenv first."
+ false
+elif [[ ! $PWD ef $E/$P$V.$D2 ]] ; then
+ echo 1>&2 "Please run from the top source directory $E/$D2."
+ false
+else
+
+ if [[ $D3 == '' ]] ; then D3=$D2 ; fi
+ if [[ $VD == '' ]] ; then VD=1 ; fi
+ if echo n $VD  grep q '[^09]' ; then
+ echo 1>&2 "The \$VD must be a nonnegative integer."
+ false
+ else
+
+ if [[ $(( $VD > 1 )) == 0 ]] ; then
+
+ if [[ $D3 != $D2 && e $E/$D3 ]] ; then
+ echo 1>&2 "Sorry, but $E/$D3 already exists."
+ false
+ else
+ if [[ $D3 != $D2 ]] ; then
+ cd ..
+ mv $P$V.{$D2,$D3}
+ rm f $DIFF
+ cd $P$V.$D3
+ fi
+ helper/updatedate $D3
+ D2=$D3
+ helper/buffesrc >/dev/null
+ debian/rules origforce
+ mkdiff
+ fi
+
+ else
+
+ if [[ e $E/$P$V.$D1$(( $VD  1 )) && $D2 != $D1 ]] ; then
+ echo 1>&2 "Sorry, but \$D2 != \$D1,"
+ echo 1>&2 "yet $E/$P$V.$D1$(( $VD  1 )) already exists."
+ false
+ else
+ rm Rf $E/$P$V.$D1$(( $VD  1 ))
+ if [[ $D2 != $D1 ]] ; then
+ cd ..
+ mv $P$V.$D1{,$(( $VD  1 ))}
+ mv $P$V.{$D2,$D1}
+ cd $P$V.$D1
+ fi
+ helper/updatedebver $VD
+ helper/updatedate $D1
+ helper/updatedate ds $D2
+ D2=$D1
+ D3=$D1
+ helper/buffesrc >/dev/null
+ mkdiff
+ fi
+
+ fi
+
+ fi
+
+fi
+
diff pruN 0.53.201204142/doc/changelog 0.56.20180123.12/doc/changelog
 0.53.201204142/doc/changelog 20120415 16:55:02.000000000 +0000
+++ 0.56.20180123.12/doc/changelog 20190107 02:08:45.000000000 +0000
@@ 1,3 +1,415 @@
+derivations (0.56.20180123.1)
+
+ * Fixed a false PDF titlebar by frustrating Libpoppler's GooString
+ destructor from btool/PDF/update_catalog.cc. (This is a
+ workaround. It seems to address the problem but, should the
+ problem continue to emerge sporadically, a better fix might be
+ needed. We shall see.)
+ * Deleted spurious source files of the Quilt patch manager.
+ * Changed nothing else. Observed that this revision introduces no
+ new version of the book, for, other than perhaps the legend in
+ the PDF viewer's title bar, the book will appear the same to
+ its readers.
+
+  Thaddeus H. Black Sat, 05 Jan 2019 00:00:00 +0000
+
+derivations (0.56.20180123)
+
+ * Ceased to state on the reverse of the title page that the book is
+ a prepublished draft, for it no longer is. Accordingly,
+ incremented the revision number to 0.56. (The book remains
+ incomplete. Maybe it always will, but it is now after 34 years
+ complete enough to drop the prepublication notice.)
+ * Added "Reliance" to pref.tex. Revised "A curiosity." Removed the
+ prepublication notice from the preface's head.
+ * In inttx.tex:
+ + Changed a few = signs to \equiv.
+ + Added the new section "Reversal of the independent variable."
+ + Based the table of antiderivatives by prepending a zerothorder
+ antiderivative.
+ * In fours.tex, generally though not greatly revised "Time, space and
+ frequency."
+ * Generally revised fouri.tex, especially in its
+ subsections "Metaduality" and "Convolution and correlation." Also
+ in the chapter:
+ + Corrected false Fourier transforms in "Convolution and
+ correlation."
+ + Corrected a false Laplace transform in the table "Laplace
+ transform pairs."
+ * In fouri.tex, noted that the interpretation of the case of the
+ Fourier transform of a function whose argument is complex remained
+ unclear to the writer.
+ * Deleted the very incomplete specf.tex.
+ * Generally revised and corrected prob.tex.
+ * Updated and condensed stub.tex. Changed its title (and might
+ change it again).
+ * Polished the final paragraph of conclu.tex.
+ * Polished the introduction to hex.tex.
+ * Resolved references in hist.tex.
+ * Cleared most of the book's remaining commenttagged action items.
+ * Edited the rest of the book in further, minor ways.
+
+  Thaddeus H. Black Tue, 23 Jan 2018 00:00:00 +0000
+
+derivations (0.55.20180117)
+
+ * In pref.tex:
+ + Polished "A curiosity" to frame it better in the reader's
+ context.
+ + Rewrote the disclaimer of shortcomings in "Acknowledgements."
+ * In alggeo.tex, "The Pythagorean theorem," clarified Euclid's
+ footnote.
+ * In trig.tex, "Trigonometric functions of the hour angles," added
+ the quarters of a right angle to the table.
+ * Refined the prose of drvtv.tex, "Remarks on the Leibnitz notation."
+ * Revised the prose of cexp.tex in several relatively minor ways,
+ especially in "The actuality of complex quantities."
+ * In noth.tex, "Averages," clarified the circumstances under which
+ the masons' example might prefer the parallel to the serial
+ average.
+ * In integ.tex, revised several points of "The length of a parabola."
+ * In taylor.tex, revised the prose of the last two paragraphs
+ of "Extrema over a complex domain."
+ * In inttx.tex, "Pythagorean radicals," fixed the S_4 calculation,
+ whose result had been right but whose details as previously printed
+ were nonsensical.
+ * In mtxinv.tex:
+ + In "The general solution," extended and substantially rewrote the
+ footnote on mantissas.
+ + Slightly improved the prose of "Complex vector inequalities" and
+ its subsections.
+ * In vector.tex, added a reference from "Multiplication" back
+ to "Reorientation."
+ * Removed from the start of conclu.tex the caution that the chapter
+ had been an early draft.
+ * In greek.tex, framed the presentation more explicitly as from
+ an Englishlanguage perspective.
+ * Cleared most of the book's remaining commenttagged action items
+ through the end of part II. (The tag is diagn without the
+ hyphens.)
+ * Edited the rest of the book in further, minor ways.
+
+  Thaddeus H. Black Wed, 17 Jan 2018 00:00:00 +0000
+
+derivations (0.55.20170703)
+
+ * In integ.tex, formally defined Heaviside's u(0) = 1.
+ * In inttx.tex, specially displayed the n = 1 and n = 2 cases in the
+ antiderivative table. Improved the general case's notation.
+ * In fours.tex:
+ + Slightly improved the introduction's style (though that still
+ probably needs some more work).
+ + Added to "Parseval's principle" a forward reference
+ to the next chapter's "Parseval's theorem."
+ + Revised and extended "Time, space and frequency."
+ + Edited the rest of the chapter in various small and medium ways,
+ too many to list here. Deleted unnecessary adjectives and adverbs
+ throughout (adverbs are often necessary than adjectives in
+ general, but the first half of this chapter seems to have had an
+ unusual number of unnecessary adverbs for some reason).
+ + Elevated and revised a footnote in "Derivation of the
+ Fouriercoefficient formula," raising to make a new paragraph with
+ which to end the subsection. (The old footnote and new paragraph
+ regard convergence.)
+ + Renamed the section on pulses as "Several useful pulses." Added
+ to it the raisedcosine pulse and partly explained why the
+ section's nonanalytic pulses are useful to discretize a signal.
+ * Generally revised and extended the existing draft of fouri.tex.
+ Recognized it now to be a complete chapter rather than merely an
+ early draft.
+ * Revised the draftstub specf.tex a bit (but there still isn't
+ much there).
+ * In conclu.tex, added to "Foundations athwart intuition" a reference
+ into prob.txt.
+ * Edited the rest of the book in further, minor ways.
+ * Observed that the author has now reviewed the whole manuscript.
+
+  Thaddeus H. Black Mon, 03 Jul 2017 00:00:00 +0000
+
+derivations (0.55.20170620)
+
+ * In mtxinv.tex:
+ + Reorganized, renamed, rereasoned and rewrote the section on
+ triangle inequalities. In that section, treated the Schwarz
+ inequality explicitly.
+ + Split a paragraph of "Inverting the square matrix" in two. In
+ the same section, tentatively footnoted an outline of additional,
+ professionalesque rigor.
+ + Rewrote "The general solution."
+ + Slightly clarified "The GaussJordan kernel formula."
+ + Corrected and retitled material regarding GramSchmidt and
+ orthogonal complements.
+ * In eigen.tex, "Diagonalization," promoted an explanatory footnote
+ to a parenthesized explanation. Numbered the explained equation.
+ * In vector.tex:
+ + Revised the chapter's introduction.
+ + Exampled a somewhat nonobvious calculation in "The cross product."
+ * In vcalc.tex:
+ + In "The Laplacian and other secondorder derivatives," extended
+ the footnote on American versus British symbology for
+ the Laplacian.
+ + Added an alternate reading to one entry of
+ the "Metric coefficients" table.
+ + Reworded the nexttolast paragraph of "Nonrectangular notation."
+ * Divided conclu.txt into sections. Revised and extended
+ the chapter.
+ * Added a few decimal numerals and/or spelledout names of numbers as
+ alternatives to hexadecimal.
+ * Edited the rest of the book in further, minor ways.
+ * Observed that the author has now reviewed the manuscript through
+ the end of the manuscript's part II.
+
+  Thaddeus H. Black Tue, 20 Jun 2017 00:00:00 +0000
+
+derivations (0.55.20170612)
+
+ * Throughout, in source expressions like "\appendix~\ref{APPENDIX}.",
+ changed the "." to "\@."
+ * In the matrix chapters, changed "correct order" of indices
+ to "proper sequence" thereof.
+ * Retitled the chapter drvtv.tex as "The derivative and
+ its incidents."
+ * In pref.tex, gave Alma Noe his own paragraph.
+ * In intro.tex, extended "Rigor to forestall error."
+ * In alggeo.tex:
+ + Renamed "Sums of powers" as "sums of exponents."
+ + Added to "Variations on the geometric series" a forward reference
+ to taylor.tex, "The powerseries expansion of 1/(1z)^(n+1)."
+ * In cexp.tex:
+ + Rewrote the chapter's introduction.
+ + Improved notation in "The real exponential."
+ + Extended the explanation of the boundary between domains
+ in "Euler's formula."
+ + In "The hyperbolic functions," rewrote the prose on extension to
+ complex phi. Restored a missing asterisk for conjugation in a
+ footnote.
+ + In "Inverse complex trigonometrics," filled an omitted step of
+ algebra.
+ + Revised the first paragraph of "Derivatives of sine and cosine."
+ + Deleted the sister'shandwriting analogy from "The actuality of
+ complex quantities."
+ * In noth.tex:
+ + In "Rational and irrational numbers," restricted the ratios to
+ have positive denominators.
+ + In "The fundamental theorem of algebra," outlined a potential
+ response to a hypothetical purist's objection; and explained why
+ no alternate roots are possible.
+ + To make the example of "Serial and parallel addition," et seq.,
+ more realistic, halved the bricklayers' productivity.
+ + Deleted the digression on business and the footnote
+ on Congressional apportionment from "Averages."
+ * In integ.tex:
+ + Revised and substantially expanded "Areas and volumes."
+ + Changed the shape of the cone's base in the figure "The volume of
+ a cone".
+ + Revised the final paragraph of "Checking an integration" to
+ comport with the last two points.
+ + Added some exercises.
+ * In taylor.tex:
+ + Slightly improved the wording of the chapter's introduction.
+ + In "The powerseries expansion of 1/(1z)^(n+1)," improved
+ notation, changed some indices and explained a few points better.
+ + Supplied a missing sign in "Shifting a power series'
+ expansion point" (and checked the corrected formula using a
+ computercalculated test case).
+ + Revised and extended "Branch points." Added two figures.
+ + Revised and extended "Extrema over a complex domain."
+ + Added a quasijustification of the stairstepping approximation
+ to "Integrating along the contour."
+ + Added another example to "Calculation outside the fast convergence
+ domain," this example in the narratives rather than the footnotes.
+ In a footnote in the same subsection, corrected an instance of
+ arctan(omega) to read arctan(zeta) as it ought.
+ * In inttx.tex:
+ + Added the new sections "Integration by the manipulation of a
+ Pythagorean radical" and "Trial derivatives."
+ + Split "An example" off as a separate subsection from "Repeated
+ poles." In those subsections, clarified the explanation of
+ shadow poles.
+ + Otherwise in "Integration by partialfraction expansion,"
+ clarified a few points and supplied a missing subscript.
+ + Slightly modified the conditions of "Frullani's integral."
+ + Corrected a significant error in the table "Antiderivatives of
+ products of exponentials, powers and logarithms," and in its
+ supporting derivation, now multiplying by k rather than dividing.
+ Reformatted the table.
+ + In "Integrating products of exponentials, powers and logarithms,"
+ commented out a questionable L'Hospitalbased explanation.
+ * In cubic.tex, added an alternate explanation regarding an
+ edge case.
+ * In matrix.tex, "The truncation operator," corrected a misnotation of
+ the general identity matrix, which runs from negative infinity
+ rather than from 1.
+ * In gjrank.tex:
+ + In "The elementary similarity transformation," improved several
+ instances of awkward and/or imprecise English.
+ + In the table "Some elementary similarity transformations,"
+ slightly extended the rule by which scaling similarity operations
+ affect unit triangulars.
+ + Explained more in "The GaussJordan decomposition."
+ + Revised the heretofore confusing subsection "Decomposing an
+ extended operator." Especially, clarified a paragraph regarding
+ fullrank extended operators.
+ + Slightly revised and clarified "A logical maneuver."
+ + In "General matrix rank and its uniqueness," explictly observed
+ that transposition/adjointing does not alter rank; and then
+ referred to the fact in "Full column rank and...." In the latter
+ section, clarified that the last paragraph speaks specifically of
+ square matrices.
+ * Added an early draft of the new concluding Remarks, conclu.tex.
+ * Edited the rest of the book in further, minor ways.
+ * Observed that, after years of inactivity, the author has now
+ reviewed the manuscript through the end of chapter 12,
+ gjrank.tex, "Matrix rank and the GaussJordan decomposition" (and
+ has reviewed the four appendices, too). These first 12 chapters
+ form the applied mathematical foundation for the whole book.
+ Significant improvements to the 12 chapters having been made, the
+ chapters have now perhaps been put in as good an order as the author
+ is likely to put them. The chapters seem finally to make a mostly
+ adequate foundation for the rest of the book.
+
+  Thaddeus H. Black Mon, 12 Jun 2017 00:00:00 +0000
+
+derivations (0.55.20170530)
+
+ * Again because of the interval between public releases, advanced the
+ book's version number to 0.55.
+ * Noted here that the overall theme of this revision is
+ mathematicsphilosophical. A book like this, which respects but
+ nevertheless deliberately eschews the professional mathematician's
+ rigor, and which covers such a broad mathematical territory, is
+ always in danger of incurring censure for sloppiness. Much of the
+ potential censure is perhaps more mathematicscultural than
+ actually helpful but it must nevertheless be taken seriously by the
+ writer. In this revision, the first four chapters, where much or
+ most of the book's philosophy is, are overhauled  especially in
+ their more philosophical passages.
+ * Slightly condensed the preface. Admitted there that the book's
+ shortcomings remain the author's own.
+ * Converted to use AMSmath's macro to style the combinatorial
+ notation.
+ * Changed "Ch." and "Chs." to "chapter" and "chapters" throughout.
+ * Shifted the positions of many adverbs to more standard spots in
+ their sentences.
+ * Because of TeX's miskerning of the italic "f", retypeset "d^kf"
+ as "d^k\!f" throughout (where anything might take the place of
+ the k but not of the f).
+ * Moved the proof of the Pythagorean theorem up to lead off chapter 1,
+ thus starting the book with something to grab the reader's attention
+ before descending into the first chapter's philosophical slog.
+ * In intro.tex:
+ + In "Axiom and definition," bridged the first two paragraphs with
+ an example.
+ + Elevated "Mathematical extension" from a subsection to a section.
+ + To "Mathematical Platonism," added a quip by Wittgenstein, a
+ quote by Noe, and some new/revised text to link these into the
+ narrative. Also, appropriately to such a subsection, added a
+ quote by Plato!
+ + Split off the new "Methods, notations, propositions and premises"
+ from the existing "Mathematical Platonism."
+ * In alggeo.tex:
+ + Improved several points of "Functions."
+ + Deleted from "Power series and analytic functions (preview)" an
+ inadvertently meaningless statement about sums of power series.
+ Also deleted the accompanying footnote.
+ + Combined all the triangle sections, reducing these sections to
+ subsections. Added a second proof of the Pythagorean theorem and
+ a supporting proof of Euclid's.
+ * In trig.tex:
+ + In "Definitions," rewrote the paragraph about radians, splitting
+ it into two paragraphs. Added an arc length to and otherwise
+ improved the figure.
+ + In "Scalars, vectors, and vector notation," added a paragraph to
+ explain how a nonphysical distance can still be a mathematical
+ distance.
+ * In drvtv.tex:
+ + Extensively revised "Considerations of the Leibnitz notation."
+ Also, split off the last part of that subsection to form the new
+ subsection "Higherorder derivatives."
+ + Added more rows to the display of Pascal's triangle. Also, added
+ a display in decimal notation.
+ + In "Combinations and permutations," added a long footnote on
+ mathematical philosophy.
+ + To "Complex powers of numbers near unity," added a new paragraph
+ to lend the reader more confidence in the subsection's chief
+ assumption.
+ + In "The Leibnitz notation," expanded the already long footnote on
+ individual Leibnitz elements, adding quotes of Robinson and
+ Derbyshire. Made other changes, too.
+ + Then, extracted the long footnote and some other material and used
+ it to form the new subsection "Remarks on the Leibnitz notation."
+ + In "The derivative of a function of a complex variable," deleted
+ most of the footnote on the unbalanced definition. Generally
+ revised the subsection.
+ + Added the new subsection, "An alternate definition of the
+ derivative."
+ + Improved notation in "The NewtonRaphson iteration."
+ * In cexp.tex, slightly improved the introduction's wording.
+ * Improved notation in mtxinv.tex, "The multivariate NewtonRaphson
+ iteration."
+ * Generally revised hex.tex.
+ * Edited the rest of the book in further, minor ways.
+
+  Thaddeus H. Black Tue, 30 May 2017 00:00:00 +0000
+
+derivations (0.54.20170508)
+
+ * Integrated various old, halfremembered development branches to
+ form this release.
+ * Acknowledged that the changelog stanza you are reading is probably
+ incomplete.
+ * Found the new place at which TeXlive keeps LaTeX's book.cls.
+ * Revised README and the man page, improving style and deleting
+ obsolete text.
+
+  Thaddeus H. Black Mon, 08 May 2017 00:00:00 +0000
+
+derivations (0.54.20120813)
+
+ * Because of the interval, 2009 to 2012, between the last two public
+ releases, advanced the book's version number to 0.54.
+ * Rewrote and extended the preface and introduction.
+ * Reviewed and slightly revised all the appendices.
+ * Shifted the positions of several adverbs in their sentences,
+ scattered across the book, usually to precede their verbs.
+ * Changed several, though not all, instances of "although"
+ to "though."
+ * Added to the narrative the birth and death dates of some
+ people quoted.
+ * Reformed several, incorrect instances of "thus" and "hence" used as
+ conjunctions (for, according to Webster, these words are adverbs
+ only, despite that "whence" can be a conjunction).
+ * In tex/alggeo.tex (Classical algebra and geometry):
+ + Rewrote the chapter's introduction. Significantly, among others,
+ added the adverb "fast" to the sentence, "The book starts fast
+ with these."
+ + Filled an oversight re the ordering of 1/a and 1/b
+ in "Inequality."
+ + Added the new subsection "Powerrelated inequality."
+ + Generally revised "Constants and variables."
+ + In "Functions," split the oneline definition of the inverse
+ function into two lines. Otherwise refined the section's text.
+ + In "Rectangular complex multiplication," commented on sqrt(i).
+ + Make several, smaller changes.
+ * In tex/drvtv.tex (The derivative),
+ + Reluctantly granted at least the real form of the unbalanced
+ definition of the derivative an equation number.
+ + Generally revised the subsection "The Leibnitz notation,"
+ splitting it into two, separate subsections, naming the second of
+ the two "Considerations of the Leibnitz notation."
+ * Edited the rest of the book in further, minor ways.
+
+  Thaddeus H. Black Mon, 13 Aug 2012 00:00:00 +0000
+
+derivations (0.53.20120811)
+
+ * In tex/specf.tex (Introduction to special functions),
+ corrected the title of the section "The Gaussian pulse and
+ its moments," and rewrote the section, as far as it had gone.
+ * Edited the rest of the book in further, minor ways.
+
+  Thaddeus H. Black Sat, 11 Aug 2012 00:00:00 +0000
+
derivations (0.53.20120414)
* Generally reorganized the book's development toward, presentation
diff pruN 0.53.201204142/doc/derivations.7 0.56.20180123.12/doc/derivations.7
 0.53.201204142/doc/derivations.7 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/doc/derivations.7 20180123 03:18:47.000000000 +0000
@@ 1,4 +1,4 @@
.TH "DERIVATIONS" 7 "10 March 2010" \
+.TH "DERIVATIONS" 7 "23 January 2018" \
"Thaddeus H. Black" "Derivations of Applied Mathematics"
.SH "NAME"
derivations \ book: Derivations of Applied Mathematics
@@ 6,50 +6,44 @@ derivations \ book: Derivations of Appl
.\" 
.\"
.SH "DESCRIPTION"
Understandably, program sources rarely derive the mathematical formulas
they use. Not wishing to take the formulas on faith, a user might
nevertheless reasonably wish to see such formulas somewhere derived.
+For various valid reasons, opensource program sources rarely derive
+the mathematical formulas they use. A user, nevertheless  not
+wishing to take such formulas on faith  might wish to see such
+formulas
+.I somewhere
+derived.
.PP
.I Derivations of Applied Mathematics
is a book which documents and derives many of the mathematical formulas
and methods implemented in free software or used in science and
engineering generally. It documents and derives the Taylor series (used
to calculate trigonometrics), the NewtonRaphson method (used to
calculate square roots), the Pythagorean theorem (used to calculate
distances) and many others.
+is a book that derives, and
+documents, many of the mathematical formulas and methods opensource
+programs use, and indeed many of the formulas and methods used in
+science and engineering generally. For example, it derives and
+documents the Taylor series (used to calculate trigonometrics), the
+NewtonRaphson method (used to calculate square roots), the Pythagorean
+theorem (used to calculate distances) and many others.
.\"
.\" 
.\"
.SH "READING THE BOOK"
Among other ways, you can read the book on your computer screen by
opening the file
.I /usr/share/doc/derivations/derivations.ps.gz
with the
.BR gv (1)
program under
.BR X (7).
To print the book on a standard postscript printer, just
.BR zcat (1)
then
.BR lpr (1)
the same file.
+.I /usr/share/doc/derivations/derivations.pdf
+with a PDF reader like (for example)
+.BR evince (1).
.\"
.\" 
.\"
.SH "FILES"
.PD 0
.TP
.I /usr/share/doc/derivations/derivations.ps.gz
the book in postscript format
.TP
.I /usr/share/doc/derivations/derivations.pdf.gz
+.I /usr/share/doc/derivations/derivations.pdf
the book in PDF
.PD
.\"
.\" 
.\"
.SH "BUGS"
The book is a work in progress.
+The book remains a work in progress.
.\"
.\" 
.\"
@@ 57,15 +51,15 @@ The book is a work in progress.
The book and this manpage are written by Thaddeus H. Black, who also
maintains the Debian package
.I derivations
in which they are distributed. Users who need to contact the author in
his role as Debian package maintainer can reach him at .
However, most email will naturally be about the book itself: this
should be sent to .
+in which Debian distributes them. Users who need to contact the author
+in his role as Debian package maintainer can reach him
+at . However, most email will naturally be about the
+book itself: this should be sent to .
.\"
.\" 
.\"
.SH "COPYLEFT"
Copyright (C) 1983\2010 Thaddeus H. Black.
+Copyright (C) 1983\2018 Thaddeus H. Black.
.PP
The book, this manpage and the entire
.I derivations
@@ 74,14 +68,4 @@ them under the terms of the GNU General
.\"
.\" 
.\"
.SH "SEE ALSO"
.BR gv (1)
.RI [ gv ],
.BR zcat (1)
.RI [ gzip ],
.BR psselect (1)
.RI [ psutils ],
.BR lpr (1)
.RI [ lpr ],
.BR octave (1)
.RI [ octave ]
+.\" .SH "SEE ALSO"
diff pruN 0.53.201204142/doc/README 0.56.20180123.12/doc/README
 0.53.201204142/doc/README 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/doc/README 20170509 13:09:03.000000000 +0000
@@ 1,3 +1,3 @@
This directory contains package documentation.
+This directory contains upstream package documentation.
diff pruN 0.53.201204142/README 0.56.20180123.12/README
 0.53.201204142/README 20120415 16:52:31.000000000 +0000
+++ 0.56.20180123.12/README 20190106 18:30:09.000000000 +0000
@@ 1,34 +1,35 @@

+
Derivations of Applied Mathematics

+
Understandably, program sources rarely derive the mathematical formulas
they use. Not wishing to take the formulas on faith, a user might
nevertheless reasonably wish to see such formulas somewhere derived.

Derivations of Applied Mathematics is a book which documents and
derives many of the mathematical formulas and methods implemented
in free software or used in science and engineering generally.
It documents and derives the Taylor series (used to calculate
trigonometrics), the NewtonRaphson method (used to calculate square
roots), the Pythagorean theorem (used to calculate distances) and
many others.

Among other ways, you can read the book on your computer screen by
opening the file /usr/share/doc/derivations/derivations.ps.gz with the
gv(1) program under X(7). To print the book on a standard postscript
printer, just zcat(1) then lpr(1) the same file.

The book is written by Thaddeus H. Black, who also maintains the Debian
package 'derivations' in which the book is distributed. Users who
need to contact the author in his role as Debian package maintainer can
reach him at . However, most email will naturally be
about the book itself: this should be sent to .
+For various valid reasons, opensource program sources rarely derive
+the mathematical formulas they use. A user, nevertheless  not
+wishing to take such formulas on faith  might wish to see such
+formulas *somewhere* derived.
+
+Derivations of Applied Mathematics is a book that derives, and
+documents, many of the mathematical formulas and methods opensource
+programs use, and indeed many of the formulas and methods used in
+science and engineering generally. For example, it derives and
+documents the Taylor series (used to calculate trigonometrics), the
+NewtonRaphson method (used to calculate square roots), the Pythagorean
+theorem (used to calculate distances) and many others.
+
+The book's format is PDF.
+
+Comments and questions regarding the book itself can be directed to the
+author at .
+
+Comments and questions regarding the book's packaging and distribution
+can be directed to the packager/distributor who has supplied you the
+book. For example, if Debian has supplied you the book, then you can
+contact the packager (who for this book happens to be the author
+himself) at .

+
Copyright (C) 19832010 Thaddeus H. Black
+Copyright (C) 19832018 Thaddeus H. Black
License:
@@ 50,18 +51,7 @@ License:
On Debian systems, the complete text of the GNU General Public License
can be found in `/usr/share/commonlicenses/GPL2'.
The tex/xkeyval.{sty,tex} and associated files
are Copyright (C) 20042008 Hendri Adriaens and licensed as follows.

 This work may be distributed and/or modified under the
 conditions of the LaTeX Project Public License, either version 1.3
 of this license or (at your option) any later version.
 The latest version of this license is in
 http://www.latexproject.org/lppl.txt
 and version 1.3 or later is part of all distributions of LaTeX
 version 2003/12/01 or later.


+
Thaddeus H. Black
Wed, 10 Mar 2010 00:00:00 +0000
+Tue, 23 Jan 2018 00:00:00 +0000
diff pruN 0.53.201204142/tex/alggeo.tex 0.56.20180123.12/tex/alggeo.tex
 0.53.201204142/tex/alggeo.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/alggeo.tex 20180114 18:27:34.000000000 +0000
@@ 4,14 +4,25 @@
\label{alggeo}
\index{classical algebra}
\index{algebra!classical}
+\index{classical geometry}
+\index{geometry!classical}
One learns arithmetic and the simplest elements of classical algebra and
geometry as a child. Few readers presumably, on the present book's
tier, would wish the book to begin with a treatment of $1+1=2$, or of
how to solve $3x  2 = 7$, or of the formal consequences of the
congruence of the several angles produced when a line intersects some
parallels. However, there are some basic points which do seem worth
touching. The book starts with these.
+\index{arithmetic}
+\index{angle!congruent}
+\index{parallel lines}
+\index{line!parallel}
+Probably every book must suppose something of its reader. This book
+supposes, or affects to suppose, little other than that its reader reads
+English and has a strong aptitude for mathematics, but it does assume
+that the reader has learned the simplest elements of classical
+arithmetic, algebra and geometry from his youth: that $1+1=2$; why
+$(2)(3)=6$; what it means when a letter like~$x$ stands in the place of
+an unspecified number; the technique to solve that $3x  2 = 7$; how to
+read the functional notation $f(x)$; which quantity a square root~$\sqrt
+x$ is; what to make of the several congruent angles that attend a line
+when the line intersects some parallels; and so on. Even so, some basic
+points of algebra and geometry seem worth touching briefly here. The
+book starts fast with these.
% 
@@ 27,8 +38,8 @@ This section states some arithmetical ru
\index{commutivity}
\index{associativity}
\index{distributivity}
\index{identity, arithmetic}
\index{inversion, arithmetic}
+\index{identity!arithmetic}
+\index{inversion!arithmetic}
\index{zero}
\index{$0$ (zero)}
\index{one}
@@ 39,7 +50,20 @@ This section states some arithmetical ru
\index{box}
\index{area}
\index{volume}
Table~\ref{alggeo:222:table} lists several arithmetical rules, each of
+\index{identifying property}
+\index{inversion!multiplicative}
+\index{commutivity!multiplicative}
+\index{associativity!multiplicative}
+\index{identity!multiplicative}
+\index{inversion!additive}
+\index{commutivity!additive}
+\index{associativity!additive}
+\index{identity!additive}
+\index{distributivity}
+Table~\ref{alggeo:222:table} lists several arithmetical rules,\footnote{%
+ \cite[\S~1.2]{Shilov}\cite[chapter~1]{Spiegel}
+}
+each of
which applies not only to real numbers but equally to the complex
numbers of \S~\ref{alggeo:225}.
\begin{table}
@@ 110,9 +134,8 @@ holds.
\ec
\end{figure}
A similar argument validates multiplicative associativity, except that
here we compute the \emph{volume} of a threedimensional rectangular
box, which box we turn various ways.%
\footnote{\cite[Ch.~1]{Spiegel}}
+here one computes the \emph{volume} of a threedimensional rectangular
+box, which box one turns various ways.
\index{inversion!multiplicative}
\index{multiplicative inversion}
@@ 123,7 +146,14 @@ $a=0$. Loosely,
\frac{1}{0}=\infty.
\]
But since $3/0 = \infty$ also, surely either the zero or the infinity,
or both, somehow differ in the latter case.
+or both, somehow differ in the latter case.%
+\footnote{%
+ Weierstrass, Kronecker, Dedekind and Frege, among others, spent much
+ of the nineteenth century intensely debating the implications of this
+ very question. The applied book you are reading however will treat
+ the matter in a more relaxed manner than did these mathematical titans
+ of yesteryear.
+}
\index{C and C++}
Looking ahead in the book, we note that the multiplicative properties do
@@ 132,8 +162,8 @@ matrix multiplication is not commutative
is not associative. Where associativity does not hold and parentheses
do not otherwise group, righttoleft association is notationally
implicit:%
\footnote{
 The fine~C and~C++ programming languages are unfortunately stuck with
+\footnote{%
+ The fine~C and~C++ programming languages unfortunately are stuck with
the reverse order of association, along with division inharmoniously
on the same level of syntactic precedence as multiplication. Standard
mathematical notation is more elegant:
@@ 141,7 +171,7 @@ implicit:%
abc/uvw = \frac{(a)(bc)}{(u)(vw)}.
\]
}$\mbox{}^,$%
\footnote{
+\footnote{%
The nonassociative \emph{cross product} $\ve B \times \ve C$ is
introduced in \S~\ref{vector:220.30}.
}
@@ 150,7 +180,7 @@ implicit:%
\]
The sense of it is that the thing on the left ($\ve A\times\mbox{}$) \emph{operates}
on the thing on the right ($\ve B \times \ve C$). (In the rare case
in which the question arises, you may want to use parentheses
+in which the question arises, you may wish to use parentheses
anyway.)
\subsection{Negative numbers}
@@ 164,10 +194,10 @@ Consider that
(a)(+b) &=& ab, \\
(a)(b) &=& +ab.
\eqb
The first three of the four equations are unsurprising, but the last is
interesting. Why would a negative count~$a$ of a negative quantity~$b$
come to a positive product~$+ab$? To see why, consider the
progression
+The first three of the four equations probably are unsurprising, but the
+last is interesting. Why would a negative count~$a$ of a negative
+quantity~$b$ come to a positive product~$+ab$? To see why, consider
+the progression
\settowidth\tla{$\ds+$}
\bqb
&\vdots& \\
@@ 188,7 +218,7 @@ be positive for this reason.
\index{inequality}
If%
\footnote{
+\footnote{%
Few readers attempting this book will need to be reminded that~$<$
means ``is less than,'' that~$>$ means ``is greater than,'' or
that~$\le$ and~$\ge$ respectively mean ``is less than or equal to''
@@ 207,9 +237,13 @@ However, the relationship between~$ua$ a
ua > ub && \mbox{if $u < 0$.}
\eqb
Also,
\[
 \frac{1}{a} > \frac{1}{b}.
\]
+\settowidth\tla{and}
+\bqb
+ \frac{1}{a} > \frac{1}{b} &&
+ \mbox{if $a > 0$ \makebox[\tla][c]{or} $b < 0$;} \\
+ \frac{1}{a} < \frac{1}{b} && \mbox{if $a < 0$ and $b > 0$.}
+\eqb
+See further \S~\ref{alggeo:224power}.
\subsection{The change of variable}
\label{alggeo:222.40}
@@ 220,28 +254,27 @@ Also,
\index{assignment}
\index{$\leftarrow$}
\index{C and C++}
The applied mathematician very often finds it convenient
\emph{to change variables,} introducing new symbols to stand in place
of old. For this we have the \emph{change of variable} or
\emph{assignment} notation%
\footnote{
 There appears to exist no broadly established standard mathematical
+The mathematician often finds it convenient \emph{to change variables,}
+introducing new symbols to stand in place of old. For this we have the
+\emph{change of variable} or \emph{assignment} notation%
+\footnote{%
+ There appears to exist no broadly established standard applied mathematical
notation for the change of variable, other than the~$=$ equal sign,
 which regrettably does not fill the role well. One can indeed use the
 equal sign, but then what does the change of variable $k=k+1$ mean?
 It looks like a claim that~$k$ and $k+1$ are the same,
 which is impossible. The notation $k\la k+1$ by contrast is
 unambiguous; it means to increment~$k$ by one. However, the latter
 notation admittedly has seen only scattered use in the literature.
+ which regrettably does not always fill the role well. One can indeed
+ use the equal sign, but then what does the change of variable $k=k+1$
+ mean? It looks like an impossible assertion that~$k$ and $k+1$ were
+ the same. The notation $k\la k+1$ by contrast is unambiguous,
+ incrementing~$k$ by one. Nevertheless, admittedly, the latter
+ notation has seen only scattered use in the literature.
The~C and~C++ programming languages use~\texttt{==} for equality
and~\texttt{=} for assignment (change of variable), as the reader may be
aware.
}
\[
 Q \la P.
+ Q \la P,
\]
This means, ``in place of~$P$, put~$Q$''; or, ``let~$Q$ now equal~$P$.''
+which means, ``in place of~$P$, put~$Q$''; or, ``let~$Q$ now equal~$P$.''
For example, if $a^2 + b^2 = c^2$, then the change of variable $2\mu \la a$
yields the new form $(2\mu)^2 + b^2 = c^2$.
@@ 253,8 +286,8 @@ Similar to the change of variable notati
Q \equiv P.
\]
This means, ``let the new symbol~$Q$ represent~$P$.''%
\footnote{
 One would never write $k\equiv k+1$. Even $k\la k+1$ can confuse
+\footnote{%
+ One would never write, $k\equiv k+1$. Even $k\la k+1$ can confuse
readers inasmuch as it appears to imply two different values for the same
symbol~$k$, but the latter notation is sometimes used anyway when
new symbols are unwanted or because more precise alternatives (like
@@ 268,9 +301,9 @@ This means, ``let the new symbol~$Q$ rep
The two notations logically mean about the same thing. Subjectively, $Q
\equiv P$ identifies a quantity~$P$ sufficiently interesting to be given
a permanent name~$Q$, whereas $Q\la P$ implies nothing especially
interesting about~$P$ or~$Q$; it just introduces a (perhaps temporary)
new symbol~$Q$ to ease the algebra. The concepts grow clearer as
+a permanent name~$Q$; whereas $Q\la P$ implies nothing especially
+interesting about~$P$ or~$Q$, but just introduces a (perhaps temporary)
+new symbol~$Q$ to ease the algebra. These concepts grow clearer as
examples of the usage arise in the book.
% 
@@ 290,7 +323,7 @@ examples of the usage arise in the book.
\index{order}
Differences and sums of squares are conveniently factored as
\bq{alggeo:224:30}
 \index{squares, sum or difference of}
+ \index{square!sum or difference of}
\begin{split}
a^2b^2 &= (a+b)(ab), \\
a^2+b^2 &= (a+ib)(aib), \\
@@ 299,10 +332,10 @@ Differences and sums of squares are conv
\end{split}
\eq
(where~$i$ is the \emph{imaginary unit,} a number defined such that $i^2
= 1$, introduced in more detail in \S~\ref{alggeo:225} below). Useful
as these four forms are, however, none of them can directly factor the
more general quadratic%
\footnote{
+= 1$, introduced in \S~\ref{alggeo:225} below). Useful as these four
+forms are, however, none of them can directly factor the more general
+quadratic%
+\footnote{%
The adjective \emph{quadratic} refers to the algebra of expressions in
which no term has greater than second order. Examples of quadratic
expressions include~$x^2$, $2x^27x+3$ and $x^2+2xy+y^2$. By
@@ 315,11 +348,15 @@ more general quadratic%
refers to the number of variables multiplied together in a term. The
term $5x^2y=5[x][x][y]$ is of third order, for instance.)
}
expression
+expression%
+\footnote{%
+ The~$\beta$ and~$\gamma$ are Greek letters, the full roster of which
+ you can find in appendix~\ref{greek}\@.
+}
\[
z^2  2\beta z + \gamma^2.
\]
To factor this, we \emph{complete the square,} writing
+To factor this, we \emph{complete the square,} writing,
\bqb
\index{square, completing the}
\index{completing the square}
@@ 329,17 +366,13 @@ To factor this, we \emph{complete the sq
&=& z^2  2\beta z + \beta^2  (\beta^2  \gamma^2) \xn\\
&=& (z\beta)^2  (\beta^2  \gamma^2).
\eqb
The expression evidently has roots%
\footnote{
 A \emph{root} of $f(z)$ is a value of~$z$ for which $f(z)=0$. See
 \S~\ref{alggeo:250}.
}
where
+The expression evidently has \emph{roots}that is, it has values
+of~$z$ that null the expressionwhere
\[
(z\beta)^2 = (\beta^2  \gamma^2),
\]
or in other words where%
\footnote{
+\footnote{%
The symbol~$\pm$ means~``$+$ or~$$.'' In conjunction with this
symbol, the alternate symbol~$\mp$ occasionally also appears,
meaning~``$$ or~$+$''which is the same thing except that, where
@@ 350,20 +383,20 @@ or in other words where%
\index{root extraction!from a quadratic polynomial}
z = \beta \pm \sqrt{\beta^2  \gamma^2}.
\eq
This suggests the factoring%
\footnote{
 It suggests it because the expressions on the left and right sides
 of~(\ref{alggeo:240:50}) are both quadratic (the highest power is~$z^2$)
 and have the same roots. Substituting into the equation the
 values of~$z_1$ and~$z_2$ and simplifying proves the suggestion
 correct.
}
+This suggests the factoring that
\bq{alggeo:240:50}
z^2  2\beta z + \gamma^2
= (zz_1)(zz_2),
\eq
where~$z_1$ and~$z_2$ are the two values of~$z$ given
by~(\ref{alggeo:240:quad}).
+by~(\ref{alggeo:240:quad}).%
+\footnote{%
+ It suggests it because the expressions on the left and right sides
+ of~(\ref{alggeo:240:50}) are each quadratic and because the two
+ expressions appear to share the same roots.
+}
+Substituting into the equation the values of~$z_1$ and~$z_2$ and
+simplifying proves the suggestion correct.
It follows that the two solutions of the quadratic equation
\bq{alggeo:240:quadeq}
@@ 371,7 +404,7 @@ It follows that the two solutions of the
\eq
are those given by~(\ref{alggeo:240:quad}), which is called \emph{the
quadratic formula.}%
\footnote{
+\footnote{%
The form of the quadratic formula which usually appears in print
is
\[
@@ 391,11 +424,10 @@ quadratic formula.}%
} = 1\ \mbox{or}\ 2.
\]
}
%
(\emph{Cubic} and \emph{quartic formulas} also exist respectively to
extract the roots of polynomials of third and fourth order, but they are
much harder. See Ch.~\ref{cubic} and its Tables~\ref{cubic:cubictable}
and~\ref{cubic:quartictable}.)
+(\emph{Cubic} and \emph{quartic formulas} also exist to extract the
+roots of polynomials respectively of third and fourth order, but they
+are much harder. See chapter~\ref{cubic} and its
+Tables~\ref{cubic:cubictable} and~\ref{cubic:quartictable}.)
% 
@@ 418,20 +450,21 @@ and~\ref{cubic:quartictable}.)
\index{index!of multiplication}
\index{multiplication!index of}
\index{loop counter}
Sums and products of series arise so frequently in
mathematical work that one finds it convenient to define terse notations
to express them. The summation notation
+Sums and products of series arise so often in mathematical work that one
+finds it convenient to define terse notations to express them. The
+summation notation
\[
\sum_{k=a}^{b} f(k)
\]
means to let~$k$ equal each of the integers $a,a+1,a+2,\ldots,b$ in
turn, evaluating the function $f(k)$ at each~$k$, then adding the
+turn, evaluating the function $f(k)$ at each~$k$ and then adding up the
several $f(k)$. For example,%
\footnote{
 The hexadecimal numeral~$\mbox{0x56}$ represents the same number the
 decimal numeral~$86$ represents. The book's preface explains why the
 book represents such numbers in hexadecimal. Appendix~\ref{hex} tells
 how to read the numerals.
+\footnote{%
+ What's that~$\mbox{0x56}$? Answer: it is a \emph{hexadecimal
+ numeral} that represents the same number the familiar, decimal numeral~$86$
+ represents. It is an eightysix. The book's preface explains why the
+ book gives such numbers in hexadecimal. Appendix~\ref{hex} tells how
+ to read the numerals, if you do not already know.
}
\[
\sum_{k=3}^{6} k^2 = 3^2 + 4^2 + 5^2 + 6^2 = \mr{0x56}.
@@ 441,19 +474,19 @@ The similar multiplication notation
\prod_{j=a}^{b} f(j)
\]
means \emph{to multiply} the several $f(j)$ rather than to add them.
The symbols~$\sum$ and~$\prod$ come respectively from the Greek letters
for~S and~P, and may be regarded as standing for ``Sum'' and
+The symbols~$\sum$ and~$\prod$ are respectively the Greek letters for~S
+and~P, writ large, and may be regarded as standing for ``Sum'' and
``Product.'' The~$j$ or~$k$ is a \emph{dummy variable, index of
summation} or \emph{loop counter}a variable with no
independent existence, used only to facilitate the addition or
multiplication of the series.%
\footnote{
+summation} or \emph{loop counter}a variable with no independent
+existence, used only to facilitate the addition or multiplication of the
+series.%
+\footnote{%
Section~\ref{integ:240} speaks further of the dummy variable.
}
(Nothing prevents one from writing~$\prod_k$ rather than~$\prod_j$,
incidentally. For a dummy variable, one can use any letter one likes.
However, the general habit of writing~$\sum_k$ and~$\prod_j$ proves
convenient at least in \S~\ref{drvtv:250.30} and Ch.~\ref{taylor}, so we
+convenient at least in \S~\ref{drvtv:250.30} and chapter~\ref{taylor}, so we
start now.)
\index{\char 33}
@@ 465,13 +498,13 @@ The product shorthand
\eqb
is very frequently used. The notation~$n!$ is pronounced ``$n$
factorial.'' Regarding the notation $n!/m!$, this can of course be
regarded correctly as~$n!$ divided by~$m!$ , but it usually proves more
+regarded correctly as~$n!$ divided by~$m!$, but it usually proves more
amenable to regard the notation as a single unit.%
\footnote{
+\footnote{%
One reason among others for this is that factorials rapidly multiply
to extremely large sizes, overflowing computer registers during
numerical computation. If you can avoid unnecessary multiplication by
 regarding $n!/m!$ as a single unit, this is a win.
+ regarding $n!/m!$ as a single unit, it helps.
}
\index{series!multiplication order of}
@@ 483,10 +516,10 @@ is not always commutative, we specify th
\cdots [f(a+2)] [f(a+1)] [f(a)]
\]
rather than the reverse order of multiplication.%
\footnote{
 The extant mathematical literature lacks an established standard on
 the order of multiplication implied by the ``$\prod$'' symbol, but
 this is the order we will use in this book.
+\footnote{%
+ The extant mathematical literature seems to lack an established
+ standard on the order of multiplication implied by the ``$\prod$''
+ symbol, but this is the order we will use in this book.
}
Multiplication proceeds from right to left. In the event that the
reverse order of multiplication is needed, we will use the notation
@@ 510,6 +543,8 @@ This means among other things that
\index{belonging}
\index{membership}
\index{set}
+\index{set notation}
+\index{$\in$}
\index{$\in \mathbb Z$}
\index{Fortran}
Context tends to make the notation
@@ 519,7 +554,7 @@ Context tends to make the notation
unnecessary, but if used (as here and in \S~\ref{alggeo:224}) it states
explicitly that~$N$, $j$ and~$k$ are integers. (The symbol~$\mathbb Z$
represents%
\footnote{
+\footnote{%
The letter~$\mathbb Z$ recalls the transitive and intransitive German
verb \emph{z\"ahlen,} ``to count.''
}
@@ 527,23 +562,23 @@ the set of all integers: $\mathbb Z \equ
\{\ldots,5,4,3,2,1,0,1,2,3,4,5,\ldots\}$. The symbol~$\in$ means
``belongs to'' or ``is a member of.'' Integers conventionally get
the letters%
\footnote{
+\footnote{%
Though Fortran is perhaps less widely used a computer programming
language than it once was, it dominated appliedmathematical computer
programming for decades, during which the standard way to declare an
 integer variable to the Fortran compiler was simply to let its name
+ integral variable to the Fortran compiler was simply to let its name
begin with~\texttt{I}, \texttt{J}, \texttt{K}, \texttt{L}, \texttt{M}
or~\texttt{N}; so, this alphabetical convention is fairly
well cemented in practice.
}%
~$i$, $j$, $k$, $m$, $n$, $M$ and~$N$ when availablethough~$i$
is sometimes avoided because the same letter represents the
+sometimes is avoided because the same letter represents the
imaginary unit of \S~\ref{alggeo:225}. Where additional letters are
needed~$\ell$, $p$ and~$q$, plus the capitals of these and the earlier
listed letters, can be pressed into service, occasionally joined
even by~$r$ and~$s$. Greek letters are avoided, asironically in light
of the symbol~$\mathbb Z$are the Roman letters~$x$, $y$ and~$z$. Refer
to Appendix~\ref{greek}.)
+to appendix~\ref{greek}\@.)
On first encounter, the~$\sum$ and~$\prod$ notation seems a bit
overwrought, whether or not the $\in\mathbb Z$ notation also is used.
@@ 561,7 +596,11 @@ notation extensively in this book.
\index{series!arithmetic}
A simple yet useful application of the series sum of \S~\ref{alggeo:227}
is the \emph{arithmetic series}
+is the \emph{arithmetic%
+\footnote{%
+ As an adjective, the word is pronounced ``arithMETic.''
+}
+series}
\[
\sum_{k=a}^{b} k = a + (a+1) + (a+2) + \cdots + b.
\]
@@ 587,7 +626,7 @@ Section~\ref{alggeo:228.30} addresses th
This necessarily tedious section discusses powers and roots.
It offers no surprises. Table~\ref{alggeo:224:t1} summarizes its
definitions and results. Readers seeking more rewarding reading may
prefer just to glance at the table then to skip directly to the start of
+prefer just to glance at the table and then to skip directly to the start of
the next section.
\begin{table}
\caption{Power properties and definitions.}
@@ 614,6 +653,13 @@ In this section, the exponents
\]
are integers,
but the exponents~$a$ and~$b$ are arbitrary real numbers.
+(What is a \emph{real number?} Section~\ref{alggeo:225} will explain;
+but, meanwhile, you can think of a real number as just a number,
+like~4, $5/2$ or~$\sqrt{3}$. There are also \emph{complex numbers}
+like $7+i4$, for which this section's resultsand, indeed, most of the
+chapter's and book's resultsturn out to be equally valid; but except
+in eqn.~\ref{alggeo:224:30} we have not met this~$i$ yet, so you need not
+worry about it for now.)
\subsection{Notation and integral powers}
\label{alggeo:224int}
@@ 627,11 +673,11 @@ The power notation
\]
indicates the number~$z$, multiplied by itself~$n$ times. More
formally, when the \emph{exponent}~$n$ is a nonnegative integer,%
\footnote{
+\footnote{%
% Strictly, the comma here should go inside the quotes, but in this
% context that might confuse the reader, so we bend the style rule
% here.
 The symbol~``$\equiv$'' means~``$=$'', but it further usually
+ The symbol~``$\equiv$'' means~``$=$'', but further usually
indicates that the expression on its right serves to define the
expression on its left. Refer to \S~\ref{alggeo:222.40}.
}
@@ 639,7 +685,7 @@ formally, when the \emph{exponent}~$n$ i
z^n \equiv \prod_{j=1}^{n}z.
\eq
For example,%
\footnote{
+\footnote{\label{alggeo:224int:fn1}%
The case~$0^0$ is interesting because it lacks an obvious
interpretation. The specific interpretation depends on the nature and
meaning of the two zeros. For interest, if $E \equiv 1/\ep$, then
@@ 677,7 +723,7 @@ For similar reasons,
\bq{alggeo:224:2}
z^{mn}=(z^m)^n=(z^n)^m.
\eq
On the other hand from multiplicative associativity and commutivity,
+On the other hand, from multiplicative associativity and commutivity,
\bq{alggeo:224:2a}
(uv)^n = u^n v^n.
\eq
@@ 711,13 +757,14 @@ for any~$z$ and integral~$n$.
\index{square root}
The number $z^{1/n}$ is called the \emph{$n$th root} of~$z$or in the
very common case $n=2$, the \emph{square root} of~$z$, often written
+very common case that $n=2$, the \emph{square root} of~$z$, often
+written as
\[
\sqrt{z}.
\]
When~$z$ is real and nonnegative, the last notation is usually
implicitly taken to mean the real, nonnegative square root.
In any case, the power and root operations mutually invert one another.
+When~$z$ is real and nonnegative, the last notation usually implicitly
+is taken to mean the real, nonnegative square root. In any case, the
+power and root operations mutually invert one another.
\index{power!real}
\index{real number!approximation of as a ratio of integers}
@@ 735,17 +782,17 @@ Taking the $q$th root,
\[
w^p=(z^p)^{1/q}.
\]
But $w=z^{1/q}$, so this is
+But $w=z^{1/q}$, so this has that
\[
(z^{1/q})^p=(z^p)^{1/q},
\]
which says that it does not matter whether one applies the power or the
root first; the result is the same. Extending~(\ref{alggeo:224:2})
+which is to say that it does not matter whether one applies the power or
+the root first: the result is the same. Extending~(\ref{alggeo:224:2})
therefore, we define $z^{p/q}$ such that
\bq{alggeo:224:12}
(z^{1/q})^p=z^{p/q}=(z^p)^{1/q}.
\eq
Since any real number can be approximated arbitrarily closely by a ratio
+Since one can arbitrarily closely approximate any real number by a ratio
of integers,~(\ref{alggeo:224:12}) implies a power definition for all
real exponents.
@@ 759,11 +806,11 @@ The proof is straightforward. If
\[
w \equiv z^{1/qs},
\]
then raising to the~$qs$ power yields
+then raising to the~$qs$ power yields that
\[
(w^s)^q = z.
\]
Successively taking the $q$th and $s$th roots gives
+Successively taking the $q$th and $s$th roots gives that
\[
w = (z^{1/q})^{1/s}.
\]
@@ 792,7 +839,9 @@ to the $1/q$ power, we have that
&=& \left[(u^{p/q}) (v^{p/q})\right]^{q/q} \\
&=& u^{p/q} v^{p/q}.
\eqb
In other words
+But as argued already in \S~\ref{alggeo:224roots}, some ratio $p/q$ of
+integers exists to approach any real number~$a$ with arbitrary
+precision, so the last means that
\bq{alggeo:224:4}
(uv)^a = u^a v^a
\eq
@@ 813,16 +862,15 @@ By identical reasoning,
\[
z^{(p/q)(r/s)} = (z^{r/s})^{p/q}.
\]
Since $p/q$ and $r/s$ can approximate any real numbers with
arbitrary precision, this implies that
+Again as before, $p/q$ and $r/s$ approximate real numbers, so
\bq{alggeo:224:4a}
(z^a)^b=z^{ab}=(z^b)^a
\eq
for any real~$a$ and~$b$.
\subsection{Sums of powers}
+\subsection{Sums of exponents}
\label{alggeo:224sdp}
\index{power!sum of}
+\index{exponent!sum of}
With~(\ref{alggeo:224:1}), (\ref{alggeo:224:4})
and~(\ref{alggeo:224:4a}), one can reason that
@@ 846,7 +894,7 @@ But then replacing $b \la b$ in~(\ref{a
\[
z^{ab} = z^az^{b},
\]
which according to~(\ref{alggeo:224:7}) is
+which according to~(\ref{alggeo:224:7}) is that
\bq{alggeo:224:8}
z^{ab} = \frac{z^a}{z^b}.
\eq
@@ 858,15 +906,46 @@ Table~\ref{alggeo:224:t1} on page~\pager
the section's definitions and results.
Looking ahead to \S~\ref{alggeo:225}, \S~\ref{trig:280} and
Ch.~\ref{cexp}, we observe that nothing in the foregoing analysis
+chapter~\ref{cexp}, we observe that nothing in the foregoing analysis
requires the base variables~$z$, $w$, $u$ and~$v$ to be real numbers; if
complex (\S~\ref{alggeo:225}), the formulas remain valid. Still, the
analysis does imply that the various exponents~$m$, $n$, $p/q$, $a$, $b$
and so on are real numbers. This restriction, we shall remove later,
+and so on are real numbers. We shall remove this restriction later,
purposely defining the action of a complex exponent to comport with the
results found here. With such a definition the results apply not only
for all bases but also for all exponents, real or complex.
+\subsection{Powerrelated inequality}
+\label{alggeo:224power}
+\index{inequality!powerrelated}
+\index{powerrelated inequality}
+
+If
+\[
+ 0 < x < y
+\]
+are real numbers (for this subsection alone of the section does not
+apply to the complex numbers of \S~\ref{alggeo:225}), then
+inductivelysince $0 < (x)(x) < (y)(y)$, $0 < (x)(x^2) < (y)(y^2)$,
+and so onwe have that $0 < x^p < y^p$ for positive, real,
+integral~$p$. Moreover, the implication is reversible, so $0 < x^{1/q}
+< y^{1/q}$, too. Combining these with $a = p/q$ and recalling
+\S~\ref{alggeo:222.6},
+\bqb
+ 0 < x^a < y^a && \mbox{if $a > 0$,} \\
+ 0 < x^a = y^a && \mbox{if $a = 0$,} \\
+ 0 < y^a < x^a && \mbox{if $a < 0$.}
+\eqb
+Similar reasoning has further that%
+\settowidth\tlb{$0 < x < 1$}%
+\bqb
+ 1 < x < x^a && \mbox{if \makebox[\tlb][l]{$x > 1$} and $a > 1$,} \\
+ 1 < x^a < x && \mbox{if \makebox[\tlb][l]{$x > 1$} and $0 < a < 1$,} \\
+ 0 < x^a < x < 1 && \mbox{if \makebox[\tlb][l]{$0 < x < 1$} and $a > 1$,} \\
+ 0 < x < x^a < 1 && \mbox{if \makebox[\tlb][l]{$0 < x < 1$} and $0 < a < 1$,}
+\eqb
+among others.
+
% 
\section{Multiplying and dividing power series}
@@ 878,14 +957,15 @@ for all bases but also for all exponents
\index{power series!with negative powers}
A \emph{power series}%
\footnote{
+\footnote{\label{alggeo:228fn1}%
Another name for the \emph{power series} is \emph{polynomial.}
The word ``polynomial'' usually connotes a power series with a finite
number of terms, but the two names in fact refer to essentially the
same thing.
Professional mathematicians use the terms more precisely.
 Equation~(\ref{alggeo:228:05}), they call a ``power series'' only if
+ Equation~(\ref{alggeo:228:05}), they callor at any rate some of
+ them calla ``power series'' only if
$a_k=0$ for all $k<0$in other words, technically, not if it
includes negative powers of~$z$. They call it a ``polynomial'' only
if it is a ``power series'' with a finite number of terms. They
@@ 903,23 +983,25 @@ A \emph{power series}%
This book follows the last usage. You however can
call~(\ref{alggeo:228:05}) a \emph{Laurent series} if you prefer (and
if you pronounce it right: ``lorON''). That is after all exactly
 what it is. Nevertheless if you do use the name ``Laurent series,''
 be prepared for people subjectivelyfor no particular reasonto
 expect you to establish complex radii of convergence, to sketch some
 annulus in the Argand plane, and/or to engage in other maybe
 unnecessary formalities. If that is not what you seek, then you may
 find it better just to call the thing by the less lofty name of
+ what it is. Nevertheless, if you do use the name ``Laurent series,''
+ be prepared for some people subjectivelyfor no particular
+ reasonto expect you to establish complex radii of convergence, to
+ sketch some annulus in the Argand plane, and/or to engage in other
+ maybe unnecessary formalities. If that is not what you seek, then you
+ may find it better just to call the thing by the less lofty name of
``power series''or better, if it has a finite number of terms, by
the even humbler name of ``polynomial.''
Semantics.
 % bad break (To remove the following \pagebreak indirectly causes a
+ % bad, bad break (To remove the following \pagebreak indirectly causes a
% page with almost nothing on it to appear. This however is subject
% to the content of the entire chapter up to and about this point.
% It is a temperamental bad break.)
\pagebreak
 All these names mean about the same thing, but one is
 expected most carefully always to give the right name in the right
+ All these names mean about the same thing, but one is expected
+ most
+ carefully
+ always to give the right name in the right
place. What a bother! (Someone once told the writer that the
Japanese language can give different names to the same object,
depending on whether the \emph{speaker} is male or female. The
@@ 935,14 +1017,14 @@ A \emph{power series}%
means quite specifically
a power series without negative powers and tends to connote a
representation of some particular function of interestas we shall
 see in Ch.~\ref{taylor}.
+ see in chapter~\ref{taylor}.%
}
is a weighted sum of integral powers:
\bq{alggeo:228:05}
A(z) = \sum_{k=\infty}^{\infty} a_kz^k,
\eq
where the several~$a_k$ are arbitrary constants. This section discusses
the multiplication and division of power series.
+in which the several weights~$a_k$ are arbitrary constants. This
+section discusses the multiplication and division of power series.
\subsection{Multiplying power series}
\label{alggeo:228.10}
@@ 958,10 +1040,11 @@ Given two power series
the product of the two series is evidently
\settoheight\tla{\scriptsize$k=\infty\ j=\infty$}
\bq{alggeo:228:prod}
 P(z) \equiv A(z)B(z) = \sum_{k=\infty\rule{0em}{\tla}}^{\infty}
 \,\sum_{j=\infty\rule{0em}{\tla}}^{\infty}
 a_jb_{kj}
 z^k.
+ P(z) \equiv A(z)B(z) =
+ \sum_{k=\infty\rule{0em}{\tla}}^{\infty}
+ \left[
+ \left(\sum_{j=\infty\rule{0em}{\tla}}^{\infty} a_jb_{kj}\right)
+ z^k \right].
\eq
\subsection{Dividing power series}
@@ 985,7 +1068,7 @@ but this subsection does it by long divi
&=& 2z + \frac{z2}{z2} + \frac{5}{z2} = 2z + 1 + \frac{5}{z2}.
\eqb
The strategy is to take the dividend%
\footnote{
+\footnote{%
If $Q(z)$ is a \emph{quotient} and $R(z)$ a \emph{remainder,} then
$B(z)$ is a \emph{dividend} (or \emph{numerator}) and $A(z)$ a
\emph{divisor} (or \emph{denominator}). Such are the
@@ 995,13 +1078,15 @@ $B(z)$ piece by piece, purposely choosin
$A(z)$.
If you feel that you understand the example, then that is really all
there is to it, and you can skip the rest of the subsection if you like.
One sometimes wants to express the long division of power series more
formally, however. That is what the rest of the subsection is about.
(Be advised however that the cleverer technique of
\S~\ref{alggeo:228.50}, though less direct, is often easier and faster.)
+there is to it, and you can skip over several pages of thick notation
+straight to \S~\ref{alggeo:228.30} if you like. Indeed, to skip is
+recommended to many or most readersthough, if you do skip, you might
+nonetheless glance along the way at Tables~\ref{alggeo:228:tbldown}
+and~\ref{alggeo:228:tblup}, which summarize and formalize the procedure
+the example has used and which also include the clever, alternate
+procedure of \S~\ref{alggeo:228.50}.
Formally, we prepare the long division $B(z)/A(z)$ by writing
+Formally, we prepare the long division $B(z)/A(z)$ by writing,
\bq{alggeo:228:20}
B(z) = A(z)Q_n(z) + R_n(z),
\eq
@@ 1033,7 +1118,7 @@ away to nothing, to make it disappear as
As in the example, we pursue the goal by choosing from $R_n(z)$ an
easily divisible piece containing the whole highorder term of~$R_n(z)$.
The piece we choose is $(r_{nn}/a_K)z^{nK}A(z)$, which we add and
subtract from~(\ref{alggeo:228:20}) to obtain
+subtract from~(\ref{alggeo:228:20}) to obtain the form
\[
B(z) = A(z)\left[Q_n(z)+\frac{r_{nn}}{a_K}z^{nK}\right] +
\left[ R_n(z)  \frac{r_{nn}}{a_K}z^{nK}A(z) \right].
@@ 1059,10 +1144,10 @@ To begin the actual long division, we in
\[
R_N(z) = B(z),
\]
for which~(\ref{alggeo:228:20}) is trivially true. Then we iterate
per~(\ref{alggeo:228:25}) as many times as desired. If an infinite
number of times, then so long as $R_n(z)$ tends to vanish as $n \ra
\infty$, it follows from~(\ref{alggeo:228:20}) that
+for which~(\ref{alggeo:228:20}) is trivially true if
+$Q_N(z)=0$. Then we iterate per~(\ref{alggeo:228:25}) as many times as
+desired. If an infinite number of times, then so long as $R_n(z)$ tends
+to vanish as $n \ra \infty$, it follows from~(\ref{alggeo:228:20}) that
\bq{alggeo:228:30}
\frac{B(z)}{A(z)} = Q_{\infty}(z).
\eq
@@ 1098,9 +1183,12 @@ procedure.%
In its $q_{nK}$ equation, the table includes also the result of
\S~\ref{alggeo:228.50} below.
It should be observed in light of Table~\ref{alggeo:228:tbldown} that
+\index{residual order}
+\index{order!residual}
+The foregoing algebra is probably punishing enough; but if not, then
+one can further observe in light of Table~\ref{alggeo:228:tbldown} that
if%
\footnote{
+\footnote{%
The notations~$K_o$, $a_k$ and~$z^k$ are usually pronounced,
respectively, as ``$K$ naught,'' ``$a$ sub $k$'' and ``$z$ to
the~$k$'' (or, more fully, ``$z$ to the $k$th power'')at least in
@@ 1115,11 +1203,11 @@ then
R_n(z) = \sum_{k=n(KK_o)+1}^n r_{nk} z^k
\ \ \mbox{for all $n < N_o+(KK_o)$.}
\eq
That is, the remainder has order one less than the divisor has. The
reason for this, of course, is that we have strategically planned the
longdivision iteration precisely to cause the leading term of the
divisor to cancel the leading term of the remainder at each step.%
\footnote{
+That is, the remainder has residual order one less than the divisor has.
+The reason for this, of course, is that we have strategically planned
+the longdivision iteration precisely to cause the divisor's leading
+term to cancel the remainder's leading term at each step.%
+\footnote{%
If a more formal demonstration of~(\ref{alggeo:228:37}) is wanted,
then consider per~(\ref{alggeo:228:25}) that
\[
@@ 1128,7 +1216,7 @@ divisor to cancel the leading term of th
If the leastorder term of $R_m(z)$ is a~$z^{N_o}$ term (as clearly is
the case at least for the initial remainder $R_N[z] = B[z]$), then
according to the equation so also must the leastorder term of
 $R_{m1}(z)$ be a~$z^{N_o}$ term, unless an even lowerorder term be
+ $R_{m1}(z)$ be a~$z^{N_o}$ term, unless an even lowerorder term is
contributed by the product $z^{mK}A(z)$. But that very product's
term of least order is a $z^{m(KK_o)}$ term. Under these
conditions, evidently the leastorder term of $R_{m1}(z)$ is a
@@ 1142,13 +1230,43 @@ divisor to cancel the leading term of th
from $z^{n(KK_o)+1}$ through~$z^n$, which is exactly the
claim~(\ref{alggeo:228:37}) makes.
}
+(If not clear from the context, a polynomial's \emph{residual order} is
+the difference between the least and greatest orders of its several
+terms. For example, the residual order of $9x^57x^4+6x^3$ is two
+because $53=2$or, if you prefer, because
+$9x^57x^4+6x^3=[x^3][9x^27x+6]$, where $9x^27x+6$ is of second
+order.\footnote{%
+ But what of $0x^57x^4+6x^3$ with its leading null coefficient? Is
+ \emph{this} polynomial's residual order also two?
+
+ Answer: that depends on what you mean. The strictly semantic
+ question of what a mere phrase ought to signify is not always very
+ interesting. After all, an infinite number of practically irrelevant
+ semantic distinctions \emph{could} be drawn. The applied
+ mathematician lacks the time.
+
+ % This is a somewhat interesting paragraph. Whether it belongs in the
+ % book at all remains to be seen, but it does not belong here.
+ %Remember: in the late nineteenth and early twentieth centuries, the
+ %mathematics profession made an heroic attempt to establish a complete
+ %body of such definitions. Though honorable, that attempt failed
+ %(\S~\ref{intro:284}). This does not mean that you and I should now
+ %turn our backs on all mathematical rigor, but it does suggest that we
+ %might be wise to approach some of the more abstract demands of
+ %\emph{professional} mathematical rigor with a certain measure of
+ %applied practicality.
+
+ Anyway, whatever semantics might eventually be settled upon, at
+ least~(\ref{alggeo:228:37}) and Table~\ref{alggeo:228:tbldown} remain
+ unambiguous.%
+})
The longdivision procedure of Table~\ref{alggeo:228:tbldown} extends
the quotient $Q_n(z)$ through successively smaller powers of~$z$. Often,
however, one prefers to extend the quotient through successively
\emph{larger} powers of~$z$, where a~$z^K$ term is $A(z)$'s term of
\emph{least} order. In this case, the long division goes by the
complementary rules of
+least rather than greatest order. In this case, the long division goes
+by the complementary rules of
Table~\ref{alggeo:228:tblup}.
\begin{table}
\caption{Dividing power series through successively larger powers.}
@@ 1196,19 +1314,19 @@ are known and
\[
Q_\infty(z) = \sum_{k=NK}^\infty q_kz^k
\]
is to be calculated, then one can multiply~(\ref{alggeo:228:60}) through
by $A(z)$ to obtain the form
+is to be calculated, then one can rearrange~(\ref{alggeo:228:60}) as
+that
\[
A(z)Q_\infty(z) = B(z).
\]
Expanding the left side according to~(\ref{alggeo:228:prod}) and
changing the index $n \la k$ on the right side,
+Expanding the rearranged equation's left side according
+to~(\ref{alggeo:228:prod}) and changing indices suitably on both sides,
\settoheight\tla{\scriptsize $k$}
\[
 \sum_{n=N\rule{0em}{\tla}}^{\infty}
 \,\sum_{k=NK\rule{0em}{\tla}}^{nK}
 a_{nk} q_k
+ \sum_{n=N\rule{0em}{\tla}}^{\infty} \left[
+ \left( \sum_{k=NK\rule{0em}{\tla}}^{nK} a_{nk} q_k \right)
z^n
+ \right]
= \sum_{n=N}^\infty b_nz^n.
\]
But for this to hold for all~$z$, the coefficients must match for
@@ 1228,19 +1346,19 @@ dividing by~$a_K$, we have that
\ \ n \ge N.
\eq
Equation~(\ref{alggeo:228:65}) computes the coefficients of $Q(z)$, each
coefficient depending on the coefficients earlier computed.
+coefficient depending not on any remainder but directly on the
+coefficients earlier computed.
The coefficientmatching technique of this subsection is easily adapted
to the division of series in decreasing, rather than increasing, powers
of~$z$ if needed or desired. The adaptation is left as an exercise to
the interested reader, but Tables~\ref{alggeo:228:tbldown}
and~\ref{alggeo:228:tblup} incorporate the technique both ways.
+of~$z$. Tables~\ref{alggeo:228:tbldown} and~\ref{alggeo:228:tblup}
+incorporate the technique both ways.
Admittedly, the fact that~(\ref{alggeo:228:65}) yields a sequence of
coefficients does not necessarily mean that the resulting power series
$Q_\infty(z)$ converges to some definite value over a given domain.
Consider for instance~(\ref{alggeo:228:45}), which diverges when%
\footnote{
+\footnote{%
See footnote~\ref{alggeo:228:fn1}.
}
$\leftz\right > 1$, even though all its coefficients are known.
@@ 1250,12 +1368,12 @@ however, often what interest us are only
\[
Q_n(z) = \sum_{k=NK}^{nK1} q_kz^k.
\]
In this case,
+In this case, in light of~(\ref{alggeo:228:20}),
\bq{alggeo:228:68}
Q_\infty(z) = \frac{B(z)}{A(z)} = Q_n(z) + \frac{R_n(z)}{A(z)}
\eq
and convergence is not an issue. Solving~(\ref{alggeo:228:68}) for
$R_n(z)$,
+and convergence is not an issue. Solving~(\ref{alggeo:228:68})
+or~(\ref{alggeo:228:20}) for $R_n(z)$,
\bq{alggeo:228:70}
R_n(z) = B(z)  A(z)Q_n(z).
\eq
@@ 1272,7 +1390,8 @@ matching of \S~\ref{alggeo:228.50}, and/
include%
\footnote{\label{alggeo:228:fn1}%
The notation~$\leftz\right$ represents the magnitude of~$z$. For
 example, $\left5\right = 5$, but also $\left5\right = 5$.
+ example, $\left5\right = 5$ and $\left8\right = 8$, but also
+ $\left5\right = 5$ and $\left8\right = 8$.
}
\bq{alggeo:228:40}
\frac{1}{1\pm z} =
@@ 1292,19 +1411,19 @@ $1/(1z)$. However, there is a simpler,
instructive way to
demonstrate the same thing, as follows. Let
\[
 S \equiv \sum_{k=0}^\infty z^{k}, \ \ \leftz\right < 1.
+ S_0 \equiv \sum_{k=0}^\infty z^{k}, \ \ \leftz\right < 1.
\]
Multiplying by~$z$ yields
+Multiplying by~$z$ yields that
\[
 zS \equiv \sum_{k=1}^\infty z^{k}.
+ zS_0 = \sum_{k=1}^\infty z^{k}.
\]
Subtracting the latter equation from the former leaves
+Subtracting the latter equation from the former leaves that
\[
 (1z)S = 1,
+ (1z)S_0 = 1,
\]
which, after dividing by $1z$, implies that
\bq{alggeo:228:45}
 S \equiv \sum_{k=0}^\infty z^{k} = \frac{1}{1z}, \ \ \leftz\right < 1,
+ S_0 \equiv \sum_{k=0}^\infty z^{k} = \frac{1}{1z}, \ \ \leftz\right < 1,
\eq
as was to be demonstrated.
@@ 1319,34 +1438,54 @@ as was to be demonstrated.
Besides being more aesthetic than the long division
of \S~\ref{alggeo:228.20}, the difference technique of
\S~\ref{alggeo:228.30} permits one to extend the basic geometric series
in several ways. For instance, the sum
+in several ways. For instance, one can compute the sum
\[
S_1 \equiv \sum_{k=0}^\infty k z^{k}, \ \ \leftz\right < 1
\]
(which arises in, among others, Planck's quantum blackbody radiation
calculation%
\footnote{\cite{McMahon}}%
), we can compute as follows. We multiply the unknown~$S_1$ by~$z$,
+) as follows. Multiply the unknown~$S_1$ by~$z$,
producing
\[
zS_1 = \sum_{k=0}^\infty k z^{k+1} = \sum_{k=1}^\infty (k1) z^k.
\]
We then subtract~$zS_1$ from~$S_1$, leaving
+Subtract~$zS_1$ from~$S_1$, leaving
\[
(1z)S_1 = \sum_{k=0}^\infty k z^{k}  \sum_{k=1}^\infty (k1)z^k
= \sum_{k=1}^\infty z^k = z \sum_{k=0}^\infty z^k = \frac{z}{1z},
\]
where we have used~(\ref{alggeo:228:45}) to collapse the last sum.
Dividing by $1z$, we arrive at
+Dividing by $1z$,
\bq{alggeo:228:50}
S_1 \equiv \sum_{k=0}^\infty k z^{k} = \frac{z}{(1z)^2},
\ \ \leftz\right < 1,
\eq
which was to be found.
Further series of the kind, such as $\sum_k k^2 z^k$, $\sum_k (k+1)(k)
z^k$, $\sum_k k^3 z^k$, etc., can be calculated in like manner as the
need for them arises.
+Further series of the kind, such as $\sum_k k^2 z^k$, can be calculated
+in like manner as the need for them arises.
+Introducing the derivative, though, chapter~\ref{drvtv} does it better:%
+\footnote{%
+ It does not really matter, but you can regard~$k^n$ to be unitythat
+ is, $k^n=1$when $n=0$ and $k=0$, though $n=0$ technically lies
+ outside the domain of~(\ref{alggeo:228:80}) as expressed. See also
+ footnote~\ref{alggeo:224int:fn1}.
+}
+\bq{alggeo:228:80}
+ S_n \equiv \sum_{k=0}^\infty k^n z^{k} = z\frac{dS_{n1}}{dz},
+ \ \ n \in \mathbb Z,\ n > 0;
+\eq
+except that you must first read chapter~\ref{drvtv} or otherwise know about
+derivatives to understand this.%
+\footnote{\label{alggeo:228:fn2}%
+ This of course is a forward reference.
+ Logically,~(\ref{alggeo:228:80}) belongs in or after chapter~\ref{drvtv},
+ but none of the earlier chapters use it, so it is kept here with the
+ rest of the geometricseries math. See chapter~\ref{drvtv}'s
+ footnote~\ref{drvtv:250:fn1}.%
+}
+See also \S~\ref{taylor:314}.
% 
@@ 1362,79 +1501,101 @@ vari \linebreak ables and dependent var
\index{sound}
Mathematical models use \emph{indeterminate constants,}
\emph{independent variables} and \emph{dependent variables.} The
three are best illustrated by example. Consider the time~$t$ a sound
needs to travel from its source to a distant listener:
+\emph{independent variables} and \emph{dependent variables.} The three
+are best illustrated by example as follows. Consider the time~$t$ a
+sound needs to travel from its source to a distant listener:
\[
t=\frac{\Delta r}{v_\mr{sound}},
\]
where~$\Delta r$ is the distance from source to listener and
$v_\mr{sound}$ is the speed of sound. Here, $v_\mr{sound}$ is an
indeterminate constant (given particular atmospheric conditions, it
doesn't vary),~$\Delta r$ is an independent variable, and~$t$ is a
dependent variable. The model gives~$t$ as a function of~$\Delta r$;
so, if you tell the model how far the listener sits from the sound source,
the model returns the time needed for the sound to propagate from one to
the other. Note that the abstract validity of the model does not
necessarily depend on whether we actually know the right figure for
$v_\mr{sound}$ (if I tell you that sound goes at $500\:\mr{m/s}$, but
later you find out that the real figure is $331\:\mr{m/s}$, it probably
doesn't ruin the theoretical part of your analysis; you just have to
recalculate numerically). Knowing the figure is not the point. The
point is that conceptually there pre\"exists some right figure for the
indeterminate constant; that sound goes at some constant
speedwhatever it isand that we can calculate the delay in terms of
this.
+where~$\Delta r$ is the distance from source to listener
+and~$v_\mr{sound}$ is the speed of sound. Here,~$v_\mr{sound}$ is an
+indeterminate constant (given particular atmospheric conditions, it does
+not vary),~$\Delta r$ is an independent variable, and~$t$ is a dependent
+variable. The model gives~$t$ as a function of~$\Delta r$; so, if you
+tell the model how far the listener sits from the sound source, then the
+model returns the time the sound needs to propagate from one to the
+other. Regarding the third quantity, the indeterminate
+constant~$v_\mr{sound}$, one conceives of this as having a definite,
+fixed value; yet, oddly, notwithstanding that the value is (or is
+thought of as) fixed, the model's abstract validity may not depend on
+whether one actually knows what the value is (if I tell you that sound
+goes at~$\mbox{350\:\mr{m/s}}$, but later you find out that the real
+figure is~$\mbox{331\:\mr{m/s}}$, this probably does not ruin the
+theoretical part of your analysis; you may only have to recalculate
+numerically). Knowing the value is not the point. The point is that
+conceptually there pre\"exists some correct figure for the indeterminate
+constant; that sound goes at some constant speedwhatever it isand
+that one can calculate the delay in terms of this.%
+\footnote{%
+ Besides the quantities themselves, there is also the manner in which,
+ or pattern by which, the quantities relate to one another. The
+ philosophy that attends this distinction lies mostly beyond the book's
+ scope, but it still seems worth a footnote to quote Bertrand Russell
+ (18721970) on the subject:
+ \begin{quote}
+ Given any propositional concept, or any unity \ldots, which may in
+ the limit be simple, its constituents are in general of two sorts:
+ (1)~those which may be replaced by anything else whatever without
+ destroying the unity of the whole; (2)~those which have not this
+ property. Thus in ``the death of Caesar,'' anything else may be
+ substituted for Caesar, but a proper name must not be substituted
+ for \emph{death,} and hardly anything can be substituted for
+ \emph{of.} Of the unity in question, the former class of
+ constituents will be called \emph{terms,} the latter
+ \emph{concepts\mdots} [Emphases in the original.]\cite[appendix~A,
+ \S~482]{Russell:1903}
+ \end{quote}
+ Sections~\ref{alggeo:250} and~\ref{alggeo:225}, and a few later ones,
+ glance upon the matter.
+}
\index{concert hall}
Although there exists a definite philosophical distinction between the
three kinds of quantity, nevertheless it cannot be denied that which
particular quantity is an indeterminate constant, an independent
variable or a dependent variable often depends upon one's immediate
point of view. The same model in the example would remain valid if
atmospheric conditions were changing ($v_\mr{sound}$ would then be an
independent variable) or if the model were used in designing a musical
concert hall%
\footnote{
 Math books are funny about examples like this. Such examples remind
 one of the kind of calculation one encounters in a childhood
 arithmetic textbook, as of the quantity of air contained in an
 astronaut's round helmet. One could calculate the quantity of water
 in a kitchen mixing bowl just as well, but astronauts' helmets are so
 much more interesting than bowls, you see.
 %(Some editor believes that if a
 %kid feels that he is doing astronautical calculations, then he may
 %grow up to be a famous scientist some day. Well, maybe. It's worth a
 %moderate try, anyway.)
+Though the three kinds of quantity remain in some sense distinct, still,
+which particular quantity one regards as an indeterminate constant, as
+an independent variable, or as a dependent variable may depend less upon
+any property of the quantity itselfor of the thing the quantity
+quantifiesthan upon the mathematician's point of view. Moreover, the
+mathematician's point of view can waver. The same model in the example
+would remain valid if atmospheric conditions were
+changing~($v_\mr{sound}$ would then be an independent variable) or if
+the model were used in designing a musical concert hall%
+\footnote{%
+ As a child, were you ever let to read one of those trendy, secondrate
+ arithmetic textbooks that had you calculate such as the quantity of
+ air in an astronaut's round helmet? One could have calculated the
+ quantity of water in a kitchen's mixing bowl just as well, but
+ astronauts' helmets are so much more interesting than bowls, you see.
+ (Whether you will have endured the condescending frivolity
+ specifically of the ersatz astronaut's textbook depends largely on
+ when and where you were raised. Trends will come and go, but maybe
+ you will have met another year's version of the same kind of thing.)
+ So, what of the concert hall?
The chance that the typical reader will ever specify the dimensions of
a real musical concert hall is of course vanishingly small. However,
it is the idea of the example that matters here, because the chance
that the typical reader will ever specify \emph{something}
technical is quite large. Although sophisticated models with many
 factors and terms do indeed play a major role in engineering, the
+ factors and terms do indeed play a large role in engineering, the
great majority of practical engineering calculationsfor quick,
daytoday decisions where small sums of money and negligible risk to
 life are at stakeare done with models hardly more sophisticated
 than the one shown here. So, maybe the concerthall example is not so
 unreasonable, after all.
}
to suffer a maximum acceptable sound time lag from the
stage to the hall's back row ($t$ would then be an independent variable;~$\Delta
r$, dependent). Occasionally we go so far as deliberately to
change our point of view in midanalysis, now regarding as an
independent variable what a moment ago we had regarded as an
indeterminate constant, for instance (a typical case of this arises in
the solution of differential equations by the method of unknown
coefficients, \S~\ref{inttx:240}). Such a shift of viewpoint is fine,
so long as we remember that there is a difference between the three
kinds of quantity and we keep track of which quantity is which kind to
us at the moment.

The main reason it matters which symbol represents which of the three
kinds of quantity is that in calculus, one analyzes how change in
independent variables affects dependent variables as indeterminate
constants remain fixed.
+ life are at stake, or for pro\"emial or exploratory analysisare
+ done with models hardly more sophisticated than the one shown here.
+ So, maybe the concerthall example is not so unreasonable, after all.
+}
+to suffer a maximum acceptable sound time lag from the stage to the
+hall's back row ($t$ would then be an independent variable;~$\Delta r$,
+dependent). Occasionally one goes so far as deliberately to shift one's
+point of view in midanalysisnow regarding as an independent
+variable, for instance, that which one a moment ago had regarded as an
+indeterminate constant (a typical case of such a shift arising in the
+solution of differential equations by the method of unknown
+coefficients, \S~\ref{inttx:240}).
+
+It matters which symbol represents which of the three kinds of quantity
+in part because, in calculus, one analyzes how change in independent
+variables affects dependent variables as indeterminate constants remain
+fixed.
(Section~\ref{alggeo:227} has introduced the dummy variable, which the
present section's threefold taxonomy seems to exclude. However, in fact,
@@ 1470,24 +1631,24 @@ base.
\index{logarithm}
The exponential operation follows the same laws the power operation
follows, but because the variable of interest is now the exponent
+follows; but, because the variable of interest is now the exponent
rather than the base, the inverse operation is not the root but rather
the \emph{logarithm:}
\bq{alggeo:230:logdef}
\log_a (a^z) = z.
\eq
The logarithm $\log_a w$ answers the question, ``What power must I raise~$a$
to, to get~$w$?''
+The logarithm $\log_a w$ answers the question, ``To what power must I
+raise~$a$ to get~$w$?''
Raising~$a$ to the power of the last equation, we have that
\[
a^{\log_a (a^z)} = a^z.
\]
With the change of variable $w \la a^z$, this is
+With the change of variable $w \la a^z$, this is that
\bq{alggeo:230:logdef2}
a^{\log_a w} = w.
\eq
Hence, the exponential and logarithmic operations mutually invert one
+Thus, the exponential and logarithmic operations mutually invert one
another.
\subsection{Properties of the logarithm}
@@ 1555,32 +1716,39 @@ logarithm.
% 
\section{Triangles and other polygons: simple facts}
+\section{The triangle}
\label{alggeo:323}
\index{geometry}
\index{triangle}
\index{polygon}
This section gives simple facts about triangles and other polygons.
+This section develops several facts about the triangle.\footnote{%
+ Fashion seems to ask a writer to burden the plain word ``triangle''
+ with various accurate but notveryhelpful adjectives like ``planar''
+ and ``Euclidean.'' We like planes and Euclid (\S~\ref{alggeo:2235})
+ but would resist the fashion. Readers already know what a triangle
+ is.%
+}
\subsection{The area of a triangle}
+\subsection{Area}
\label{alggeo:323.10}
+\index{area}
\index{triangle!area of}
\index{right triangle}
\index{triangle!right}
\index{rectangle!splitting of down the diagonal}
The area of a \emph{right} triangle%
\footnote{
 A \emph{right triangle} is a triangle, one of whose three angles is
 perfectly square.
+The area of a right triangle%
+\footnote{%
+ As the reader likely knows, a \emph{right triangle} is a triangle, one
+ of whose three angles is perfectly square.
}
is half the area of the corresponding rectangle. This is seen by
splitting a rectangle down its diagonal into a pair of right triangles
of equal size. The fact that \emph{any} triangle's area is half its
base length times its height is seen by dropping a perpendicular from
one point of the triangle to the opposite side (see
Fig.~\ref{intro:284:fig} on page~\pageref{intro:284:fig}), dividing the
+Fig.~\ref{intro:284:fig} on page~\pageref{intro:284:fig}),
+thereby dividing the
triangle into two right triangles, for each of which the fact is true.
In algebraic symbols,
\bq{alggeo:323:10}
@@ 1602,15 +1770,15 @@ which itself is longer than the differen
\eq
where~$a$, $b$ and~$c$ are the lengths of a triangle's three sides.
These are the \emph{triangle inequalities.} The truth of the sum
inequality $c < a+b$, is seen by sketching some triangle on a sheet of
+inequality, that $c < a+b$, is seen by sketching some triangle on a sheet of
paper and asking: if~$c$ is the direct route between two points and
$a+b$ is an indirect route, then how can $a+b$ not be longer? Of course
the sum inequality is equally good on any of the triangle's three sides,
so one can write $az$. So, what phase differential exactly causes
+$z+\Delta z=z$? Where indeed is the boundary between the inward and
+outward domains? Answer: $2\pi/4$.
+Such are the paradoxes of calculus!) With such results in hand, now let
+us recall from earlier in the section thatas we have asserted or
+defined%
\[
\exp i\theta = \lim_{\ep\rightarrow 0} ( 1 + i\ep )^{\theta/\ep},
\]
and that this remains so for arbitrary real~$\theta$. Yet what does
such an equation do, mechanically, but to compute $\exp i\theta$ by multiplying~$1$ by
$1 + i\ep$ repeatedly, $\theta/\ep$ times? The plain answer is that
such an equation does precisely this and nothing else. We have
+such an equation does precisely this and nothing else.\footnote{See
+footnote~\ref{cexp:230fn1}.} We have
recently seen how each multiplication of the kind the equation suggests
increments the phase~$\phi$ by $\Delta\phi = \ep$ while not changing the
magnitude~$\rho$. Since the phase~$\phi$ begins from $\arg 1 = 0$
@@ 741,23 +771,19 @@ where
Equation~(\ref{cexp:230:33}) serves to raise any complex number to a
complex power.
%Reasoning like this section's, of a motivational rather than a deductive
%character, seems to make some professional mathematicians feel slightly
%uneasy. Professional mathematicians seem to tend to prefer
%to take~(\ref{cexp:euler}), its Taylor series (Ch.~\ref{taylor}), or
%some other, nearly related form as the \emph{definition} of the complex
%exponential. The applied mathematician however tends to prefer to
%emphasize motivation over definition, which is why this book has
%motivated~(\ref{cexp:euler}) rather than just to define it.

\index{Euler's formula!curious consequences of}
\index{natural logarithm!of a complex number}
Curious consequences of Euler's formula~(\ref{cexp:euler}) include that
+Curious consequences of Euler's formula~(\ref{cexp:euler}) include
+that\footnote{%
+ Notes of the obvious, like $n\in\mathbb Z$, are sometimes omitted by
+ this book because they clutter the page. However,
+ the note is included in this instance.%
+}
\bq{cexp:230:34}
\begin{split}
 e^{\pm i2\pi/4} &= \pm i, \\
 e^{\pm i2\pi/2} &= 1, \\
 e^{i n2\pi} &= 1.
+ e^{\pm i2\pi/4} &= \pm i; \\
+ e^{\pm i2\pi/2} &= 1; \\
+ e^{i n2\pi} &= 1,\:\ n\in\mathbb Z.
\end{split}
\eq
For the natural logarithm of a complex number in light of Euler's
@@ 779,7 +805,7 @@ formula, we have that
\index{Moivre, Abraham de (16671754)}
\index{de Moivre's theorem}
Euler's formula~(\ref{cexp:euler}) implies that complex numbers~$z_1$
and~$z_2$ can be written
+and~$z_2$ can be written as
\bq{cexp:240:05}
\begin{split}
z_1 &= \rho_1 e^{i\phi_1}, \\
@@ 813,13 +839,13 @@ we have that
\exp( +i\phi ) &=& \cos \phi + i\sin\phi, \\
\exp( i\phi ) &=& \cos \phi  i\sin\phi.
\eqb
Adding the two equations and solving for $\cos\phi$ yields
+Adding the two equations and solving for $\cos\phi$ yields that
\bq{cexp:250:cos}
\index{cosine!in complex exponential form}
\cos\phi = \frac{\exp(+i\phi)+\exp(i\phi)}{2}.
\eq
Subtracting the second equation from the first and solving for
$\sin\phi$ yields
+$\sin\phi$ yields that
\bq{cexp:250:sin}
\index{sine!in complex exponential form}
\sin\phi = \frac{\exp(+i\phi)\exp(i\phi)}{i2}.
@@ 832,6 +858,9 @@ Thus are the trigonometrics expressed in
\index{hyperbolic trigonometrics}
\index{trigonometrics!hyperbolic}
+\index{hyperbolic sine}
+\index{hyperbolic cosine}
+\index{hyperbolic tangent}
\index{Pythagorean theorem!and the hyperbolic functions}
The forms~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}) suggest the
definition of new functions
@@ 842,7 +871,8 @@ definition of new functions
\eqa
These are called the \emph{hyperbolic functions.} Their inverses $\mopx{arccosh}$,
etc., are defined in the obvious way. The Pythagorean theorem for
trigonometrics~(\ref{trig:226:25}) is that $\cos^2\phi + \sin^2\phi=1$;
+trigonometrics~(\ref{trig:226:25}) is that $\cos^2\phi + \sin^2\phi=1$,
+verified by combining~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin});
and from~(\ref{cexp:250:cosh}) and~(\ref{cexp:250:sinh}) one can derive
the hyperbolic analog:
\bq{cexp:250:pythag}
@@ 851,32 +881,39 @@ the hyperbolic analog:
\cosh^2\phi  \sinh^2\phi &= 1.
\end{split}
\eq
Both lines of~(\ref{cexp:250:pythag}) hold for complex~$\phi$ as well as
for real.%
\footnote{
 Chapter~\ref{vector} teaches that the ``dot product'' of a unit vector
+
+Although Fig.~\ref{cexp:230:fig} has visualized only real~$\phi$,
+complex~$\phi$ can be considered, too. Nothing prevents
+one from taking~(\ref{cexp:250:cos}) through~(\ref{cexp:250:sinh}), as
+written, \emph{to define} the trigonometrics of complex~$\phi$;
+so that's what we now do. From this it follows
+that~(\ref{cexp:250:pythag}) and others must likewise hold\footnote{
+ Chapter~\ref{vector} teaches that the \emph{dot product} of a unit vector
and its own conjugate is unity$\vu v^{*} \cdot \vu v = 1$, in the
 notation of that chapterwhich tempts one incorrectly to suppose by
+ notation of that chapterwhich tempts one to suppose incorrectly by
analogy that $(\cos\phi)^{*}\cos\phi + (\sin\phi)^{*}\sin\phi = 1$ and
that $(\cosh\phi)^{*}\cosh\phi  (\sinh\phi)^{*}\sinh\phi = 1$ when the
angle~$\phi$ is complex. However,~(\ref{cexp:250:cos})
 through~(\ref{cexp:250:sinh}) can generally be true only
+ through~(\ref{cexp:250:sinh}) can be generally true only
if~(\ref{cexp:250:pythag}) holds exactly as written for complex~$\phi$
as well as for real. Hence in fact $(\cos\phi)^{*}\cos\phi +
 (\sin\phi)^{*}\sin\phi \neq 1$ and $(\cosh\phi)\cosh\phi 
+ (\sin\phi)^{*}\sin\phi \neq 1$ and $(\cosh\phi)^{*}\cosh\phi 
(\sinh\phi)^{*}\sinh\phi \neq 1$.
 Such confusion probably tempts few readers unfamiliar with the
 material of Ch.~\ref{vector}, so you can ignore this footnote for now.
 However, if later you return after reading Ch.~\ref{vector} and if the
 confusion then arises, then consider that the angle~$\phi$ of
 Fig.~\ref{trig:226:f1} is a real angle, whereas we originally derived
 (\ref{cexp:250:pythag})'s first line from that figure. The figure is
 quite handy for real~$\phi$, but what if anything the figure means
 when~$\phi$ is complex is not obvious. If the confusion descends
 directly or indirectly from the figure, then such thoughts may serve
 to clarify the matter.
+ Fig.~\ref{trig:226:f1} is quite handy for real~$\phi$ but what if
+ anything the figure means when~$\phi$ is complex is not obvious.
+ The~$\phi$ of the figure cannot quite be understood to mean an actual
+ direction or bearing in the eastnorthwestsouth sense. Therefore,
+ visual analogies between geometrical vectors like~$\vu v$, on the one
+ hand, and Argandplotted complex numbers, on the other, can
+ analytically fail, especially in circumstances in which~$\phi$ may be
+ complex.
+ (The professional mathematician might smile at this, gently prodding
+ us that this is why one should rely on analysis rather than on mere
+ geometrical intuition. If so, then we would acknowledge the
+ prod~\cite{Carson/Huber} without further comment in this instance.)%
}
+for complex~$\phi$.
\index{cis}
The notation $\exp i(\cdot)$ or $e^{i(\cdot)}$ is sometimes felt to be too
@@ 910,10 +947,10 @@ hyperbolic use.
\index{natural logarithmic family of functions}
\index{trigonometric family of functions}
\index{inverse trigonometric family of functions}
At this point in the development one begins to notice that the $\sin$,
$\cos$, $\exp$, $\cis$, $\cosh$ and $\sinh$ functions are each really
+At this point in the development one begins to notice that the $\cos$,
+$\sin$, $\exp$, $\cis$, $\cosh$ and $\sinh$ functions are each really
just different facets of the same mathematical phenomenon. Likewise
their respective inverses: $\arcsin$, $\arccos$, $\ln$, $i\ln$, $\mopx{arccosh}$
+their respective inverses: $\arccos$, $\arcsin$, $\ln$, $i\ln$, $\mopx{arccosh}$
and $\mopx{arcsinh}$. Conventional names for these two mutually inverse
families of functions are unknown to the author, but one might
call them the \emph{natural exponential} and \emph{natural logarithmic
@@ 921,7 +958,6 @@ families.} Or, if the various tangent f
might call them the \emph{trigonometric} and \emph{inverse trigonometric
families.}
% diagn: this subsection wants a bit more review.
\subsection{Inverse complex trigonometrics}
\label{cexp:250.30}
\index{trigonometrics!inverse complex}
@@ 931,8 +967,14 @@ Since one can express the several trigon
complex exponentials one would like to know, complementarily, whether
one cannot express the several inverse trigonometric functions in terms
of complex logarithms. As it happens, one can.%
\footnote{\cite[Ch.~2]{Spiegel}}
+\footnote{\cite[chapter~2]{Spiegel}}
+\index{arcsine}
+\index{arccosine}
+\index{arctangent}
+\index{hyperbolic arcsine}
+\index{hyperbolic arccosine}
+\index{hyperbolic arctangent}
Let us consider the arccosine function, for instance. If
per~(\ref{cexp:250:cos})
\[
@@ 941,7 +983,9 @@ per~(\ref{cexp:250:cos})
then by successive steps
\bqb
e^{iw} &=& 2z  e^{iw}, \\
 \left(e^{iw}\right)^2 &=& 2z\left(e^{iw}\right)  1, \\
+ \left[e^{iw}\right]^2 &=&
+ \left[e^{iw}\right]\left[2z  e^{iw}\right] =
+ 2z\left(e^{iw}\right)  1, \\
e^{iw} &=& z \pm \sqrt{ z^2  1 },
\eqb
the last step of which has used the quadratic
@@ 1069,15 +1113,18 @@ inverse trigonometric functions.
\index{cosine!derivative of}
\index{derivative!of sine and cosine}
One can compute derivatives of the sine and cosine functions
from~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}), but to do it in that
way doesn't seem sporting. Better applied style is to find the
derivatives by observing directly the circle from which the sine and
cosine functions come.
+One could compute derivatives of the sine and cosine functions
+from~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}). To do so is left as
+an exercise. Meanwhile, however, another, more sporting way to find
+the derivatives is known: one can directly examine the circle from which
+the sine and cosine functions come.
Refer to Fig.~\ref{cexp:320:fig}. Suppose that
the point~$z$ in the figure is not fixed but travels steadily
about the circle such that
+about the circle such that\footnote{%
+ Observe the Greek letter~$\omega$, omega, which is not a Roman~$w$.
+ Refer to appendix~\ref{greek}\@.%
+}
\bq{cexp:320:30}
z(t) = (\rho)\left[\cos(\omega t+\phi_o) + i\sin(\omega t+\phi_o)\right].
\eq
@@ 1168,7 +1215,7 @@ of~(\ref{cexp:320:32}), we have that
\frac{d}{dt}\sin (\omega t + \phi_o) &= +\omega\cos (\omega t + \phi_o).
\end{split}
\eq
If $\omega = 1$ and $\phi_o = 0$, these are
+If $\omega = 1$ and $\phi_o = 0$, these are that
\bq{cexp:320:12}
\begin{split}
\frac{d}{dt}\cos t &= \sin t, \\
@@ 1309,6 +1356,9 @@ way. Table~\ref{cexp:drvi} summarizes.
}
\end{table}
+Table~\ref{cexp:drvi} may prove useful when the integration technique of
+\S~\ref{inttx:210} is applied.
+
% 
\section{The actuality of complex quantities}
@@ 1316,10 +1366,13 @@ way. Table~\ref{cexp:drvi} summarizes.
\index{complex number!actuality of}
\index{number!complex, actuality of}
Doing all this neat complex math, the applied mathematician can lose
sight of some questions he probably ought to keep in mind: Is there
really such a thing as a complex quantity in nature? If not, then
hadn't we better avoid these complex quantities, leaving them to the
+\index{nature!complex number in}
+\index{complex number in nature}
+Doing all this theoretically interesting complex mathematics, the
+applied mathematician can lose sight of some questions he probably ought
+to keep in mind: Do complex quantities arise in nature? If they do
+not, then what physical systems do we mean to model with them? Hadn't
+we better avoid these complex quantities, leaving them to the
professional mathematical theorists?
\index{Heaviside, Oliver (18501925)}
@@ 1337,7 +1390,7 @@ Answer: again,~$400$~g.
\index{conjugate}
\index{superposition}
\index{mirror}
\index{handwriting, reflected}
+%\index{handwriting, reflected}
\index{grapes}
Probably you would not choose to think of~$200+i100$~g of grapes
and~$200i100$~g of grapes, but because of~(\ref{cexp:250:cos})
@@ 1347,53 +1400,91 @@ for instance the propagating wave
\[
A\cos[\omega tkz] = \frac{A}{2}\exp[+i(\omega tkz)] + \frac{A}{2}\exp[i(\omega tkz)].
\]
The benefit of splitting the real cosine into two complex parts is that
+The benefit of splitting the real cosine into two complex parts is that,
while the magnitude of the cosine changes with time~$t$, the magnitude
of either exponential alone remains steady (see the circle in
Fig.~\ref{cexp:230:fig}). It turns out to be much easier to analyze two
complex wave quantities of constant magnitude than to analyze one real
wave quantity of varying magnitude. Better yet, since each complex wave
quantity is the complex conjugate of the other, the analyses thereof are
mutually conjugate, too; so you normally needn't actually analyze the
second. The one analysis suffices for both.%
+mutually conjugate, too (\S~\ref{alggeo:225.2}); so you normally needn't
+actually analyze the second. The one analysis suffices for both.%
\footnote{
If the point is not immediately clear, an example: Suppose that by
the NewtonRaphson iteration (\S~\ref{drvtv:270}) you have found a
root of the polynomial $x^3 + 2x^2 + 3x + 4$ at
$x \approx \mbox{0x0.2D}+i\mbox{0x1.8C}$. Where is there another root?
 Answer: at the complex conjugate, $x \approx
 \mbox{0x0.2D}i\mbox{0x1.8C}$. One need not actually run the
 NewtonRaphson again to find the conjugate root.
+ Answer: there is a conjugate root at $x \approx
+ \mbox{0x0.2D}i\mbox{0x1.8C}$. Because the polynomial's coefficients
+ are real, one need not actually run the NewtonRaphson again to find
+ the conjugate root.
+
+ \index{voltage}
+ \index{tension, electric}
+ \index{potential!electric}
+ \index{electric tension or potential}
+ \index{capacitor}
+ \index{electric capacitor}
+ \index{electric current}
+ \index{current, electric}
+ Another example, this time with a wave: suppose that, when fed by a
+ timevarying electric current of
+ $(5.0\:\mbox{milliamps})\exp\{+i(60\:\mbox{sec}^{1})2\pi t\}$, an electric
+ capacitor develops a voltagethat is, develops an electric tension or
+ potentialof
+ $(40\:\mbox{volts})\exp\{+i[(60\:\mbox{sec}^{1})2\pi t  2\pi/4]\}$.
+ It immediately follows, without further analysis, that the same
+ capacitor, if fed by a timevarying electric current of
+ $(5.0\:\mbox{milliamps})\exp\{i(60\:\mbox{sec}^{1})2\pi t\}$, would
+ develop a voltage of
+ $(40\:\mbox{volts})\exp\{i[(60\:\mbox{sec}^{1})2\pi t  2\pi/4]\}$.
+ The conjugate current gives rise to a conjugate voltage.
+
+ The reason to analyze an electric circuit in such a way is that, after
+ analyzing it, one can sum the two complex currents to get a real a.c.\
+ current like the current an electric wall receptacle supplies. If one
+ does this, then one can likewise sum the two complex voltages to
+ compute the voltage the capacitor would develop. Indeed, this is how
+ electrical engineers normally analyze a.c.\ systems (well,
+ electrical engineers know some shortcuts, but this is the idea),
+ because~$\exp(\cdot)$ is so much easier a function to handle
+ than~$\cos(\cdot)$ or~$\sin(\cdot)$ is.
}
% The following analogy admittedly remains a little silly, but it stays
% here unless and until the author thinks of something better.
(It's like reflecting your sister's handwriting. To read her
handwriting backward, you needn't ask her to try writing reverse with
the wrong hand; you can just hold her regular script up to a mirror.
Of course, this ignores the question of why one would want to reflect
someone's handwriting in the first place; but anyway,
reflectingwhich is to say, conjugatingcomplex quantities often is
useful.)
+%(It's like reflecting your sister's handwriting. To read her
+%handwriting backward, you needn't ask her to try writing reverse with
+%the wrong hand; you can just hold her regular script up to a mirror.
+%Of course, this ignores the question of why one would want to reflect
+%someone's handwriting in the first place; but anyway,
+%reflectingwhich is to say, conjugatingcomplex quantities often is
+%useful.)
\index{Ockham's razor!abusing}
\index{Ockham!William of (c.~12871347)}
\index{Aristotle (384322~B.C.)}
Some authors have gently denigrated the use of imaginary parts in
physical applications as a mere mathematical trick, as though the parts
were not actually there. Well, that is one way to treat the matter, but
+were not actually there.\footnote{%
+ One who gently denigrates the use can nevertheless still apply the
+ trick! They often do.%
+}
+Well, that is one way to treat the matter, but
it is not the way this book recommends. Nothing in the mathematics
\emph{requires} you to regard the imaginary parts as physically
nonexistent. You need not abuse Ockham's razor!
% diagn: the following parenthetical note wants review.
(Ockham's razor, ``Do not multiply objects without necessity,''%
\footnote{
 \cite[Ch.~12]{Stroustrup}
+ \cite[chapter~12]{Stroustrup}
}
is not a bad philosophical indicator as far as it goes, but is overused in
some circlesparticularly in circles in which Aristotle%
+is a sound philosophical indicator when properly used. However,
+the razor is overused in some circles, particularly in circles in which
+Aristotle%
\footnote{\cite{Feser}}
is mistakenly believed to be vaguely outdated. More often than one
likes to believe, the necessity to multiply objects remains
+is believedmistakenly, in this writer's viewto be vaguely
+outdated; or more likely in circles in which Aristotle has been
+altogether forgotten. More often than one likes to believe, the
+necessity to multiply objects remains
hidden until one has ventured the multiplication, nor reveals
itself to the one who wields the razor, whose hand humility should
stay.) It is true by Euler's formula~(\ref{cexp:euler}) that a complex
@@ 1402,18 +1493,18 @@ However, it is equally true by the compl
formulas~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}) that \emph{a
trigonometric can be decomposed into a sum of complex exponentials.}
So, if each can be decomposed into the other, then which of the two is
the real decomposition? Answer: it depends on your point of view.
+the true decomposition? Answer: that depends on your point of view.
Experience seems to recommend viewing the complex exponential as the
basic elementas the element of which the trigonometrics are
composedrather than the other way around. From this point of view,
it is~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}) which are the real
decomposition. Euler's formula itself is secondary.
+it is~(\ref{cexp:250:cos}) and~(\ref{cexp:250:sin}) which are the true
+decomposition. Euler's formula~(\ref{cexp:euler}) itself could be viewed in
+this sense as secondary.
% diagn: review this revised paragraph one more time.
The complex exponential method of offsetting imaginary parts offers an
elegant yet practical mathematical means to model physical wave
phenomena. So go ahead: regard the imaginary parts as actual.
Aristotle would regard them so (or so the author suspects).
+phenomena. It may find other uses, too, so go ahead: regard the imaginary
+parts as actual. Aristotle would regard them so (or so the author suspects).
%\footnote{
% A famous Englishlanguage physics book of the twentieth century, which
% this particular footnote will not name but which was and remains
diff pruN 0.53.201204142/tex/conclu.tex 0.56.20180123.12/tex/conclu.tex
 0.53.201204142/tex/conclu.tex 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/tex/conclu.tex 20180123 22:25:03.000000000 +0000
@@ 0,0 +1,450 @@
+% 
+
+\addtocounter{chapter}{5}
+\chapter{Remarks}
+\label{conclu}
+\index{remarks, concluding}
+\index{concluding remarks}
+
+A book could tell more about derivations of applied mathematics, maybe
+without limit. This book ends here.
+
+If you have learned by reading, I too have learned by writing. An
+engineer, I have long observed advantages of the applied approach to
+mathematics. Writing however has let drawbacks of the applied approach
+impress themselves as well. Some drawbacks are nonobvious.
+
+\section{Frege}
+
+\index{Frege, Friedrich Ludwig Gottlob (18481925)}
+\index{G\"odel, Kurt Friedrich (19061978)}
+\index{Hilbert, David (18621943)}
+\index{Kant, Immanuel (17241804)}
+\index{Hume, David (17111776)}
+\index{language, natural}
+\index{natural language}
+\index{arithmetic}
+In~1879, the mathematician and philosopher Gottlob Frege explained,
+\begin{quote}
+ In apprehending a scientific truth we pass, as a rule, through various
+ degrees of certitude. Perhaps first conjectured on the basis of an
+ insufficient number of particular cases, a general proposition comes
+ to be more and more securely established by being connected with other
+ truths through chains of inferences, whether consequences are derived
+ from it that are confirmed in some other way or whether, conversely,
+ it is seen to be a consequence of propositions already established.
+ Hence we can inquire, on the one hand, how we have gradually arrived
+ at a given proposition and, on the other, how we can finally provide
+ it with the most secure foundation. The first question may have to be
+ answered differently for different persons; the second is more
+ definite, and the answer to it is connected with the inner nature of
+ the proposition considered. The most reliable way of carrying out a
+ proof, obviously, is to follow pure logic, a way that, disregarding
+ the particular characteristics of objects, depends solely on those
+ laws upon which all knowledge rests\mdots \emph{To prevent anything
+ intuitive from penetrating here unnoticed,}\footnote{Emphasis added.}
+ I had to bend every effort to keep the chain of emphasis free of gaps.
+ In attempting to comply with this requirement in the strictest
+ possible way I found the inadequacy of language to be an obstacle; no
+ matter how unwieldy the expressions I was ready to accept, I was less
+ and less able, as the relations became more and more complex, to
+ attain the precision that my purpose required. This deficiency led me
+ to the idea of [an] ideography [whose] first purpose \ldots\ is to
+ provide us with the most reliable test of the validity of a chain of
+ inferences and to point out every presupposition that tries to sneak
+ in unnoticed, so that its origin can be investigated.~\cite[Preface]{Frege:1879}
+\end{quote}
+Frege's sentiments are poignant in that, like G\"odel and
+unlike Hilbert, Frege can fairly be described as a Platonist. (Witness
+Frege's words in a later book: ``In arithmetic we are not concerned with
+objects which we come to know as something alien from without through
+the medium of the senses, but with objects given directly to our reason
+and, as its nearest kin, utterly transparent to it. And yet, or rather
+for that very reason, these objects are not subjective fantasies. There
+is nothing more objective than the laws of
+arithmetic.''\footnote{\cite{Lotter}}) To Frege, unlike to
+Hume or even to Kant, Platonic realism affords all the more cause to
+distrust merely human processes of inference in mathematical matters.
+
+The present book being long enough as it is, one can hardly see how to
+have met a truly Fregean standard while covering sufficient applied
+mathematical ground within the covers of a single volume; but yet does
+Frege not still have a point? I believe that he does. In some Fregean sense,
+the book you are reading has been a book of \emph{derivation sketches}
+of various merit rather than of derivations proper. I am reasonably
+satisfied that the book does what it had set out to do, but also
+observe that a close study of the book points out the supplementary
+need too for more formulaic approaches.
+
+\section{Temperament}
+
+\index{teacher}
+\index{instructor}
+\index{course}
+\index{undergraduate}
+\index{circuit theory}
+\index{continuous and discrete systems}
+\index{discrete and continuous systems}
+\index{system!continuous or discrete}
+\index{electronics}
+\index{industrial electronics}
+\index{programming}
+\index{C++}
+\index{electromagnetics}
+\index{freshman}
+\index{sophomore}
+\index{junior}
+\index{United States}
+\index{Fourier, Jean Baptiste Joseph\\(17681830)}
+\index{set theory}
+\index{foundations of mathematics}
+\index{screwdriver}
+The matter in question is, naturally, not one you and I are likely to
+settle in a few paragraphs, so for the present purpose let me turn
+attention to a facet of the matter which has proved significant at least
+to me. Though I am a practicing engineer rather than a teacher, it so
+happens that (besides being the father of six, which has made me a teacher
+of another kind) I have over the years, on a parttime basis, taught
+several stateuniversity courses in electrical engineering, having
+instructed in sum about~1000 engineering undergraduates in subjects like
+circuit theory, industrial electronics, continuous and discrete
+systems,~C++ programming, and electromagnetics. My undergraduates have
+been U.S. freshmen, sophomores and juniors mostly aged~18 to~21, so none of
+the teaching has been very advanced; and indeed as measured in U.S.\
+academia such occasional instructional experience as mine, sans
+academic research, counts for so little that I should hardly mention it
+here except for one point: an instructor cannot instruct so many
+engineering undergraduates without coming to understand somewhat of how
+young future engineers think and learn. When an engineering undergraduate is
+finding, say, Fourier's concepts hard to grasp, his engineering
+instructor will not extrude the topic into formulations congenial to set
+theory. Rather, the instructor will sketch some diagrams, assign some
+pencilandpaper exercises, require the handson construction/testing of
+a suitable mechanical/chemical/electrical apparatus, and then field
+questions by engaging the undergraduate's physical intuition as directly
+as the instructor can. Of course, professional mathematicians likewise
+brandish partly analogous intuitional techniques from
+their own instructional arsenals; but the professional carries the
+additional burden of preparing \emph{his} students, by gradual stages,
+to join mathematics' grand investigation into foundationsor at least
+he carries the burden of teaching his students, in the spirit of Frege,
+how to deploy formal methods to preclude error. The engineering student
+lacks the time and, usually, the temperament for that. He has a
+screwdriver in his hand.
+
+\index{bridge}
+\index{logic}
+And this state of affairs is right and proper, is it not? Give the
+professional his due. While we engineers are off designing bridges or
+whatever, the professional mathematician will make it his business to be
+better at logic than we.
+
+\section{Foundations athwart intuition}
+\label{conclu:athwart}
+
+\index{Cantor, Georg (18451918)}
+\index{philosophy!Greek}
+\index{Greek philosophy}
+\index{Reginald of Piperno, Father (c.~1230c.~1290)}
+\index{Aquinas, St.~Thomas (12251274)}
+\index{physicalintuitional methods}
+The experience of writing this book has strengthened my own conviction
+that the search for the ultimate foundations of mathematics is probably
+futile. If we have not unearthed the ultimate foundations by now,~2500
+years on from when Greek philosophy started excavating, then the
+coming~2500 years seem unlikely to reveal them. Some may think, ``We
+have the computer now. We have Cantor's set theory. It is different.''
+But I am not convinced.
+Not a professional mathematician nor a philosopher, I
+neither expect nor ask a reader to lend my conviction in such a matter
+much weightnor does \emph{this} book seriously attempt to support
+the conviction\footnote{Try~\cite{Feser}
+and~\cite{Feser:StTh}, rather.}but as for myself, I doubt that it is
+in the nature of mortal human
+intellect to discover or grasp ultimate foundations of such a kind.
+Like Father Reginald, I credit St.~Thomas' last report.\footnote{%
+ For reasons that have little to do with mathematics, it has been
+ fashionable to impute such credit to fanaticism. Fashion, however, is
+ as fleeting as it is shallow. You and I must aim deeper than that.
+ Refer to~\cite[``St.~Thomas Aquinas'']{CatholicEncyc}.%
+}
+% I did not wish to strike the following footnote. However, the
+% footnote drags the innocent reader into a dispute for which he did not
+% ask. Unfortunately, I'll just have to take the bigotry. Fortunately,
+% I am not important enough for the bigotry, because of a sentence in
+% the last chapter of a long book, to have much practical effect. At
+% any rate, the footnote is struck.
+%\footnote{%
+% Though I am aware of the careless bigotry (how could one be unaware?)\
+% that affects to sneer at such remarks in the regrettably unstable
+% country and era in which I write, the notice of St.~Thomas is not a
+% social signal. The book is not about signaling. It should go without
+% saying (but somehow it doesn't) that you simply cannot be serious
+% about~2500 years of Western philosophy while summarily writing off the
+% schoolmen.
+%
+% That this footnote should be necessary shames reason and embarrasses
+% posterity. As Gibbon might have addedno friend to the schoolmen
+% hethat this footnote should be necessary condemns the taste of our
+% age.%
+% %What a waste of good ink.%
+%}
+
+Even so, unexpectedly, the experience of writing the book has
+illuminated in my sight a certain mathematical inadequacy of
+physicalintuitional methods. Wherever large enough a mathematical
+structure is built by mostly physicalintuitional methods, a fact of
+experience seems to emerge: the structure begins to creak.
+
+Professional mathematicians would have told us as much.
+
+And yetthe na\"ive, merely plausible extension of mathematical
+methods carries greater impact, and retains more power, than pure
+mathematics may like to admit, as in \S~\ref{prob:200.90} for example.
+Anyway, such na\"ive extension is more
+than \emph{merely} plausible. Such na\"ive extension yields correct
+results in the main, too, as for example in this book's appliedlevel
+development of complex exponentials and analytic continuation.
+Sometimes, bending the arc of reason to follow the trail of intuition
+is the right thing to do.
+
+Substantial value subsists in the applied approach.
+
+\section{Convergence}
+\label{conclu:convergence}
+
+\index{convergence}
+\index{professor}
+One of the more debatable choices I have made during the writing of this
+book has been to skip explicit justification of the convergence of
+various sums and integralsor, if you prefer, has been to leave the
+justification in most instances as an exercise. A pure mathematician
+would not have done so, not at any rate in the same way. I still recall
+an undergraduate engineering lecture, though, decades ago, during which
+the lean, silverhaired engineering professorpausing the second time
+to justify an interchange of summing operatorsrather turned to the
+class and confided, ``Instead of justifying the interchange again, let's
+just \emph{do it,} okay?'' That professor had his priorities straight.
+
+%\index{Weierstrass, Karl Wilhelm Theodor (18151897)}
+Admittedly, to train the intuition, a mathematical education probably
+ought at some stage to expose the student to formal, Weierstrassian
+tests of convergence. However, except at that stage, the repetitive
+justification of convergence soon grows tiresome. If the reader cannot
+readily tell for himself, \emph{in a given concrete case,} whether a sum
+converges, is this not probably because the reader fails to understand
+the term being summed? If the reader indeed fails to understand, then
+Weierstrass can hardly help.
+
+Though the proposition remains debatable, I believeat least insofar
+as the book you are reading is an applied workthat the book's
+approach to convergence has been the right one.
+
+%Weierstrassian tests have their place in mathematics.
+%Weierstrassian tests have, however, had less of a place in this book.
+
+\section{Klein}
+\label{conclu:klein}
+
+\index{Klein, C.~Felix (18491925)}
+\index{G\"ottingen}
+November 2,~1895, at G\"ottingen, the mathematician Felix Klein (like
+Frege a German) masterfully summarized both sides of these matters. His
+lecture and its English translation having passed into the public
+domain, we are free to quote Klein at length as follows.
+\begin{quotation}
+ \index{Newton, Sir Isaac (16421727)}
+ \index{Leibnitz, Gottfried Wilhelm (16461716)}
+ \index{Gauss, Carl Friedrich (17771855)}
+ \index{Abel, Niels Henrik (18021829)}
+ \index{Cauchy, Augustin Louis (17891857)}
+ \index{Dirichlet, J.~Peter Gustav Lejeune (18051859)}
+ \noindent
+ \ldots\ With the contemplation of nature as its starting point, and its
+ interpretation as object, a philosophical principle, the principle of
+ continuity, was made fundamental; and the use of this principle
+ characterizes the work of the great pioneers, Newton and Leibnitz, and
+ the mathematicians of the whole of the eighteenth centurya century of
+ discoveries in the evolution of mathematics. Gradually, however, a more
+ critical spirit asserted itself and demanded a logical justification for
+ the innovations made with such assurance, the establishment, as it were,
+ of law and order after the long and victorious campaign. This was the
+ time of Gauss and Abel, of Cauchy and Dirichlet. But this was not the
+ end of the matter. Gauss, taking for granted the continuity of space,
+ unhesitatingly used space intuition as a basis for his proofs; but
+ closer investigation showed not only that many special points still
+ needed proof, but also that space intuition had led to the too hasty
+ assumption of the generality of certain theorems which are by no means
+ general. Hence arose the demand for exclusively arithmetical means of
+ proof; nothing shall be accepted as a part of the science unless its
+ rigorous truth can be clearly demonstrated by the ordinary operations of
+ analysis\mdots [W]here formerly a diagram served as proof, we now find
+ continual discussions of quantities which become smaller than, or which
+ can be taken smaller than, any given small quantity. The continuity of
+ a variable, and what it implies, are discussed\mdots
+
+ Of course even this assigns no absolute standard of exactness; we can
+ introduce further refinements if still stricter limitations are placed
+ on the association of the quantities. This is exemplified \ldots\ in
+ the efforts to introduce symbols for the different logical processes, in
+ order to get rid of the association of ideas, and the lack of accuracy
+ which creeps in unnoticed, and therefore not allowed for, when
+ ordinary language is used\mdots
+
+ Summing up all these developments in the phrase, \emph{the arithmetizing
+ of mathematics,} I pass on to consider the influence of the tendency
+ here described on parts of the science outside the range of analysis
+ proper. Thus, as you see, while voluntarily acknowledging the
+ exceptional influence of the tendency, I do not grant that the
+ arithmetized science is the essence of mathematics; and my remarks have
+ therefore the twofold character of positive approbation, and negative
+ disapproval. For since I consider that the essential point is not the
+ mere putting of the argument into the arithmetical form, but the more
+ rigid logic obtained by means of this form, it seems to me
+ desirableand this is the positive point of my thesisto subject the
+ remaining divisions of mathematics to a fresh investigation based on the
+ arithmetical foundation of analysis. On the other hand I have to point
+ out most emphaticallyand this is the negative part of my taskthat
+ it is not possible to treat mathematics exhaustively by the method of
+ logical deduction alone\mdots
+
+ In the short time at my disposal I must content myself with presenting
+ the most important points; I begin therefore by tracing the relation of
+ the positive part of my thesis to the domain of geometry. The
+ arithmetizing of mathematics began originally, as I pointed out, by
+ ousting space intuition; the first problem that confronts us as we turn
+ to geometry is therefore that of reconciling the results obtained by
+ arithmetical methods with our conception of space\mdots The net result
+ is, on the one hand, a refinement of the process of space intuition; and
+ on the other, an advantage due to the clearer view that is hereby
+ obtained of the analytical results considered, with the consequent
+ elimination of the paradoxical character that is otherwise apt to attach
+ itself to them\mdots [T]here still remains the more important question:
+ What justification have we for regarding the totality of points in space
+ as a numbermanifoldness in which we interpolate the irrational numbers
+ in the usual manner between the rational numbers arranged in three
+ dimensions? We ultimately perceive that space intuition is an inexact
+ conception, and that in order that we may subject it to mathematical
+ treatment, we idealize it by means of the socalled axioms\mdots
+
+ \index{Jacobi, Carl Gustav Jacob (18041851)}
+ Another question is this: Practical physics provides us plentifully with
+ experimental results, which we unconsciously generalize and adopt as
+ theorems about the idealized objects\mdots [T]he theorem that every
+ finite elastic body is capable of an infinite series of harmonic
+ oscillations [belongs to this category]\mdots [Is such a theorem],
+ taken in the abstract, [an] exact mathematical [theorem], or how must
+ [it] be limited and defined in order that [it] may become so?\,\ldots\
+ You see here what is the precise object of \ldots\ renewed
+ investigations; not any new physical insight, but abstract mathematical
+ argument in itself, on account of the clearness and precision which will
+ thereby be added to our view of experimental facts. If I may use an
+ expression of Jacobi's in a somewhat modified sense, it is merely a
+ question of intellectual integrity, ``die Ehre des menschlichen
+ Geistes.''
+
+ After expressing myself thus it is not easy, without running counter to
+ the foregoing conclusions, to secure to intuition her due share in our
+ science;%
+ \footnote{Klein characterizes mathematics as a ``science'' so
+ often that, insofar as my book is quoting Klein with approbation, I
+ should note that I have never been persuaded that \emph{science} is
+ the right word for it. This is a minor quibble, and my witness
+ may not weigh much in comparison with that of the eminent Klein,
+ but I would nevertheless prefer rather to regard mathematics as a
+ branch of \emph{philosophy.}} and yet it is exactly on this antithesis
+ that the point of my present statements depends. I am now thinking
+ not so much of the cultivated intuition just discussed, which has been
+ developed under the influence of logical deduction and might almost be
+ called a form of memory; but rather of the na\"ive intuition, largely
+ a natural gift, which is unconsciously increased by minute study of
+ one branch or other of the science. The word \emph{intuition} is
+ perhaps not well chosen; I mean it to include that instinctive feeling
+ for the proportion of the moving parts with which the engineer
+ {c}{r}{i}{t}{i}{c}{i}{s}{e}{s} the distribution of power in any piece
+ of mechanism he has constructed; and even the indefinite conviction
+ the practiced calculator possesses as to the convergence of any
+ infinite process that lies before him. I maintain that mathematical
+ intuitionso understoodis always far in advance of logical
+ reasoning and covers a wider field.
+
+ I might now introduce an historical excursus, showing that in the
+ development of most of the branches of our science, intuition was the
+ starting point, while logical treatment followed\mdots The question in
+ all such cases, to use the language of analysis, is one of
+ interpolation, in which less stress is laid on exactness in particular
+ details than on a consideration of the general conditions\mdots Logical
+ investigation is not in place until intuition has completed the task of
+ idealization\mdots
+
+ \index{Gymnasium}
+ \index{instructor}
+ \index{university}
+ \index{professor}
+ I must add a few words on mathematics from the point of view of
+ pedagogy. We observe in Germany at the present day a very remarkable
+ condition of affairs in this respect; two opposing currents run side
+ by side without affecting one another appreciably. Among instructors
+ in our Gymnasia [that is, roughly as understood in North American
+ terms, in Germany's elite, preparatory high schools] the need of
+ mathematical instruction based on intuitive methods has now been so
+ strongly and universally emphasized that one is compelled to enter a
+ protest, and vigorously insist on the necessity for strict logical
+ treatment\mdots Among the university professors of our subject exactly
+ the reverse is the case; intuition is frequently not only undervalued,
+ but as much as possible ignored. This is doubtless a consequence of
+ the intrinsic importance of the arithmetizing tendency in modern
+ mathematics. But the result reaches far beyond the mark. It is high
+ time to assert openly once for all that this implies, not only a false
+ pedagogy, but also a distorted view of the science. I \ldots\ have
+ always discouraged the layingdown of general rules for higher
+ mathematical teaching, but this shall not prevent me from saying that
+ two classes at least of mathematical lectures must be based on
+ intuition; the elementary lectures which actually introduce the
+ beginner to higher mathematicsfor the scholar must naturally follow
+ the same course of development on a smaller scale, that the science
+ itself has taken on a largerand the lectures which are intended for
+ those whose work is largely done by intuitive methods, namely, natural
+ scientists and engineers. Through this onesided adherence to logical
+ form we have lost among these classes of men much of the prestige
+ properly belonging to mathematics, and it is a pressing and urgent
+ duty to regain this prestige by judicious treatment.
+
+ \index{botanist}
+ \index{tree}
+ \index{branch}
+ \index{root}
+ To return to theoretical considerations, the general views which I
+ uphold in regard to the present problems of mathematical
+ science need scarcely be specially formulated. While I desire in
+ every case the fullest logical working out of the material, yet I
+ demand at the same time an intuitive grasp and investigation of the
+ subject from all sides. Mathematical developments originating in
+ intuition must not be considered actual constituents of the science
+ till they have been brought into a strictly logical form. Conversely,
+ the mere abstract statements of logical relations cannot satisfy us
+ until the extent of their application to every branch of intuition is
+ vividly set forth, and we recognize the manifold connections of the
+ logical scheme, depending on the branch which we have chosen, to the
+ other divisions of our knowledge. The science of mathematics may be
+ compared to a tree thrusting its roots deeper into the earth and
+ freely spreading out its shady branches into the air. Are we to
+ consider the roots or the branches as the essential part? Botanists
+ tell us that the question is badly framed, and that the life of the
+ organism depends on the mutual interaction of its different
+ parts.~\cite{Klein}
+\end{quotation}
+I entertain no illusions that the book whose end you have reached
+measures up to Klein's high standardnor is the book a product of the
+mathematical profession in any case, so Klein's standard was never one
+the book precisely sought to meetyet more than a century after Klein
+spoke, I can still think of no more fitting way to end the book than
+with Klein's robust reflections. During quiet moments, when the applied
+mathematician is not out throwing bridges across chasms and such, he may
+well ponder that which Klein has taught.
+
+\nopagebreak
+
+\noindent\\
+THB
+
diff pruN 0.53.201204142/tex/cubic.tex 0.56.20180123.12/tex/cubic.tex
 0.53.201204142/tex/cubic.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/cubic.tex 20180115 00:01:08.000000000 +0000
@@ 13,7 +13,7 @@ heavy lifting of the afternoon, one like
rest a spell in the shade. Chapters~\ref{alggeo} through~\ref{inttx}
have established the applied mathematical foundations upon which coming
chapters will build; and
Ch.~\ref{matrix},
+chapter~\ref{matrix},
hefting the weighty topic of the matrix, will indeed begin to build on
those foundations. But in this short chapter which rests between, we
shall refresh ourselves with an interesting but lighter mathematical
@@ 41,8 +41,8 @@ like~(\ref{alggeo:240}), and as we have
can occasionally fail to converge. One would prefer an actual formula
to extract the roots.
\index{Cardano, Girolamo
 (also known as Cardanus or Cardan, 15011576)}
+\index{Cardan, Girolamo
+ (also known as Cardano or Cardanus, 15011576)}
\index{Tartaglia, Niccol\`o Fontana (14991557)}
\index{Ferrari, Lodovico (15221565)}
\index{Vieta, Franciscus (Fran\c cois Vi\`ete, 15401603)}
@@ 51,7 +51,7 @@ to extract the roots.
No general formula to extract the roots of the $n$thorder
polynomial seems to be known.%
\footnote{
 Refer to Ch.~\ref{noth}'s footnote~\ref{noth:320:fn20}.
+ Refer to chapter~\ref{noth}'s footnote~\ref{noth:320:fn20}.
}
However, to extract the roots of the \emph{cubic} and \emph{quartic}
polynomials
@@ 60,7 +60,7 @@ polynomials
z^4 + a_3z^3 + a_2z^2 + a_1z + a_0, &&
\eqb
though the ancients never discovered how, formulas do exist.
The 16thcentury algebraists Ferrari, Vieta, Tartaglia and Cardano
+The 16thcentury algebraists Ferrari, Vieta, Tartaglia and Cardan
have given us the clever technique. This chapter explains.%
\footnote{%
\cite[``Cubic equation'']{EWW}%
@@ 71,25 +71,34 @@ have given us the clever technique. Thi
\cite[\S~1.5]{SRW}
}
% There is a controversy regarding Cardano, as to whether he plagiarized
+% There is a controversy regarding Cardan, as to whether he plagiarized
% his most celebrated results. The author does not know the facts of it
% but feels some unaccountable suspicion regarding the accusation. Why
% we should believe the accuser and not the accused in this case
% remains unclear to the author, whose intuition warns him to suspect
% magnification of the controversy by someone who likes to magnify. (If
% Tartaglia really did tell Cardano his secrets and then swear Cardano
+% Tartaglia really did tell Cardan his secrets and then swear Cardan
% to secrecy, we have only Tartaglia's putative word for it as far as
% the author knows. Besides, swearing someone to secrecy? What is
% Tartaglia supposed to have been, a wouldbe member of the ancient
% Pythagorean cult? If so, that's a bit weird. Why didn't Tartaglia
% just publish his formula, if he was so worried about losing credit for
% it, eh? Well, the author wonders if Tartaglia and Cardano weren't
+% it, eh? Well, the author wonders if Tartaglia and Cardan weren't
% both pretty normal, in fact, and if the story of their conflict hasn't
% received someshall we saydramatic embellishment.) The author
% does not wish to be unnecessarily credulous, nor is he really
% interested in researching the matter; so, in this book, Cardano is
+% interested in researching the matter; so, in this book, Cardan is
% treated as innocent until proven guilty. Tartaglia, too.
%
+% (Some time after the writer wrote the last paragraph, he encountered
+% Charles C. Pinter's sensational tale of Cardan and Tartaglia. Pinter
+% explains that scholarship in Cardan's day was not as scholarship
+% today, and that Cardan had competed against at least one other
+% algebraist for a prize or wager, using secret techniques. Pinter's
+% story is new to the writer, who has no particular reason to doubt it.
+% If the story is accurate, it does make Cardanconspiracy theories seem
+% at least a little more probable.)
+%
% Well, enough of that. Back to the book.
% 
@@ 129,7 +138,7 @@ true. For $\leftw\right \ll \leftw_o
The constant~$w_o$ is the \emph{corner value,} in the neighborhood of
which~$w$ transitions from the one domain to the other.
Figure~\ref{cubic:200:Vietafig} plots Vieta's transform for real~$w$ in
the case $w_o=1$.
+the case that $w_o=1$.
\begin{figure}
\caption[Vieta's transform, plotted logarithmically.]
{Vieta's transform~(\ref{cubic:200:10}) for $w_o=1$, plotted
@@ 279,19 +288,21 @@ should like to improve the notation by d
Q &\la +\frac{q}{2},
\end{split}
\eq
with which~(\ref{cubic:220:25}) and~(\ref{cubic:220:45}) are written
+with which~(\ref{cubic:220:25}) and~(\ref{cubic:220:45}) are written,
\bqa
x^3 &=& 2Q  3Px, \label{cubic:220:55} \\
(w^3)^2 &=& 2Qw^3 + P^3. \label{cubic:220:58}
\eqa
Table~\ref{cubic:cubictable} summarizes the complete cubic polynomial
root extraction meth\od in the revised notationincluding a few fine
+Table~\ref{cubic:cubictable} summarizes the complete cubicpolynomial
+rootextraction meth\od\footnote{\cite[eqn.~5.3]{Spiegel/Liu}}
+in the revised notationincluding a few fine
points regarding superfluous roots and edge cases, treated in
\S\S~\ref{cubic:235} and~\ref{cubic:240} below.
\begin{table}
\caption[A method to extract the three roots of the general cubic.]
{A method to extract the three roots of the general cubic polynomial.
 (In the definition of~$w^3$, one can choose either sign.)}
+ (In the definition of~$w^3$, one can choose either sign for
+ the~$\pm$.)}
\label{cubic:cubictable}
\index{cubic expression!roots of}
\index{root extraction!from a cubic polynomial}
@@ 349,8 +360,8 @@ distinct~$x$?
To prove that it does, let us suppose that it did not. Let us suppose
that a single~$w^3$ did generate two~$w$ which led to the same~$x$.
Letting the symbol~$w_1$ represent the third~$w$, then (since all
three~$w$ come from the same~$w^3$) the two~$w$ are $e^{+i2\pi/3}w_1$
and $e^{i2\pi/3}w_1$. Because $x \equiv w  P/w$, by successive steps,
+three~$w$ come from the same~$w^3$) the two~$w$ are $w=e^{\pm
+i2\pi/3}w_1$. Because $x \equiv w  P/w$, by successive steps,
\bqb
e^{+i2\pi/3}w_1  \frac{P}{e^{+i2\pi/3}w_1} &=& e^{i2\pi/3}w_1  \frac{P}{e^{i2\pi/3}w_1}, \\
e^{+i2\pi/3}w_1 + \frac{P}{e^{i2\pi/3}w_1} &=& e^{i2\pi/3}w_1 + \frac{P}{e^{+i2\pi/3}w_1}, \\
@@ 411,27 +422,28 @@ considered.} One can choose either sign
The one sign alone yields all three roots of the general cubic
polynomial.
In calculating the three~$w$ from~$w^3$, one can apply the
+To calculate the three~$w$ from~$w^3$, one can apply the
NewtonRaphson iteration~(\ref{drvtv:270:35}), the Taylor series of
Table~\ref{taylor:315:tbl}, or any other convenient rootfinding
technique to find a single root~$w_1$ such that $w_1^3 = w^3$. Then the
other two roots come easier. They are $e^{\pm i2\pi/3}w_1$; but $e^{\pm
+technique to find a single root~$w_1$ such that $w_1^3 = w^3$. The
+other two roots then come easier. They are $e^{\pm i2\pi/3}w_1$; but $e^{\pm
i2\pi/3} = (1 \pm i\sqrt 3)/2$, so
\bq{cubic:235:60}
w = w_1, \frac{1 \pm i\sqrt 3}{2} w_1.
\eq
\index{double root}
\index{root!double}
We should observe, incidentally, that nothing prevents two actual roots
of a cubic polynomial from having the same value. This certainly is
possible, and it does not mean that one of the two roots is superfluous
or that the polynomial has fewer than three roots. For example, the
cubic polynomial $(z1)(z1)(z2) = z^3  4z^2 + 5z  2$ has roots
at~$1$, $1$ and~$2$, with a single root at $z=2$ and a double
rootthat is, two rootsat $z=1$. When this happens, the method of
Table~\ref{cubic:cubictable} properly yields the single root once and
the double root twice, just as it ought to do.
+% This paragraph is probably unneeded.
+%\index{double root}
+%\index{root!double}
+%We should observe, incidentally, that nothing prevents two actual roots
+%of a cubic polynomial from having the same value. This certainly is
+%possible, and it does not mean that one of the two roots is superfluous
+%or that the polynomial has fewer than three roots. For example, the
+%cubic polynomial $(z1)(z1)(z2) = z^3  4z^2 + 5z  2$ has roots
+%at~$1$, $1$ and~$2$, with a single root at $z=2$ and a double
+%rootthat is, two rootsat $z=1$. When this happens, the method of
+%Table~\ref{cubic:cubictable} properly yields the single root once and
+%the double root twice, just as it ought to do.
% 
@@ 439,7 +451,7 @@ the double root twice, just as it ought
\label{cubic:240}
\index{edge case}
Section~\ref{cubic:235} excepts the edge cases $P=0$ and $P^3=Q^2$.
+Section~\ref{cubic:235} excepts the edge cases $P=0$ and $P^3=Q^2$\@.
Mostly the book does not worry much about edge cases, but the effects
of these cubic edge cases seem sufficiently nonobvious that the
book might include here a few words about them, if for no other
@@ 461,18 +473,31 @@ Both edge cases are interesting. In thi
first the edge cases themselves, then their effect on the proof of
\S~\ref{cubic:235}.
The edge case $P=0$, like the general nonedge case, gives two distinct
quadratic solutions~$w^3$. One of the two however is $w^3=QQ=0$, which is
awkward in light of Table~\ref{cubic:cubictable}'s definition that
$x \equiv wP/w$. For this reason, in applying the table's method when
$P=0$, one chooses the other quadratic solution, $w^3 = Q + Q = 2Q$.
+The edge case $P=0$, $Q\neq 0$, like the general nonedge case, gives
+two distinct quadratic solutions~$w^3$. One of the two however is
+$w^3=QQ=0$, which is awkward in light of
+Table~\ref{cubic:cubictable}'s definition that $x \equiv wP/w$. For
+this reason, in applying the table's method when $P=0$, one chooses the
+other quadratic solution, $w^3 = Q + Q = 2Q$.
+(A reader who wishes to take extra care of the logic might here ask how
+one can be entirely sure that $w^3=0$ is not the~$w^3$ we want to use
+despite that $x \equiv wP/w$. More than one answer to this concern
+could be given. One answer would be that the fundamental theorem of
+algebra, \S~\ref{noth:320.30}, implies three finite roots; so,
+since~$w^3=0$ can supply none of the three, it must be that $w^3=2Q$
+supplies all of them. A different answer is given later in the
+section.)
The edge case $P^3=Q^2$ gives only the one quadratic solution
$w^3=Q$; or more precisely, it gives two quadratic solutions which
+The edge case $P^3=Q^2\neq 0$ gives only the one quadratic solution
+$w^3=Q$; or, more precisely, it gives two quadratic solutions which
happen to have the same value. This is fine. One merely accepts
that $w^3=Q$, and does not worry about choosing one~$w^3$ over the
+that $w^3=Q$ and does not worry about choosing one~$w^3$ over the
other.
+Neither edge case yields more than one, distinct, usable value
+for~$w^3$, evidently. It would seem that the two edge cases were not
+troubled by the superfluous roots of \S~\ref{cubic:235}.
+
\index{triple root}
\index{root!triple}
\index{corner case}
@@ 484,6 +509,16 @@ to~(\ref{cubic:220:55}), $x^3 = 2Q  3Px
$x^3 = 0$ and thus that $x = 0$ absolutely, no other~$x$ being possible.
This implies the triple root $z=a_2/3$.
+\index{double root}
+\index{root!double}
+And how about merely \emph{double} roots? Section~\ref{cubic:235} has
+already shown that double roots cannot arise in nonedge cases. One
+can conclude that all cases of double roots are edge cases. (To
+identify to which of the two edge cases a double root corresponds is
+left as an exercise to the interested reader.\footnote{%
+ The writer has not had cause to investigate the matter.%
+})
+
Section~\ref{cubic:235} has excluded the edge cases from its proof of
the sufficiency of a single~$w^3$. Let us now add the edge cases to the
proof. In the edge case $P^3=Q^2$, both~$w^3$ are the same, so the
@@ 536,12 +571,12 @@ quartic reduction is actually the simple
remained still unknown, this writer does not know, but one supposes
that it might make an interesting story.
 The reason the quartic is simpler to reduce is probably related to the
+ The reason the quartic is simpler to reduce is perhaps related to the
fact that $(1)^{1/4} = \pm 1, \pm i$, whereas $(1)^{1/3} = 1, (1 \pm
i\sqrt 3)/2$. The $(1)^{1/4}$ brings a much neater result, the roots
lying nicely along the Argand axes. This may also be why the quintic
is intractablebut here we trespass the professional mathematician's
 territory and stray from the scope of this book. See Ch.~\ref{noth}'s
+ territory and stray from the scope of this book. See chapter~\ref{noth}'s
footnote~\ref{noth:320:fn20}.
}
@@ 666,8 +701,8 @@ In view of~(\ref{cubic:250:50}), the cha
\end{split}
\eq
improves the notation. Using the improved notation,
Table~\ref{cubic:quartictable} summarizes the complete quartic
polynomial root extraction method.
+Table~\ref{cubic:quartictable} summarizes the complete
+quarticpolynomial rootextraction method.
\begin{table}
\caption[A method to extract the four roots of the general quartic.]
{A method to extract the four roots of the general quartic
@@ 775,7 +810,7 @@ seen%
pages on this.
}
by writing $z=p/q$where~$p,q \in \mathbb Z$ are integers and the
fraction $p/q$ is fully reducedthen multiplying the $n$thorder
+fraction $p/q$ is fully reducedand then multiplying the $n$thorder
polynomial by~$q^n$ to reach the form
\[
a_np^n + a_{n1}p^{n1}q + \cdots + a_1pq^{n1} + a_0q^n = 0,
diff pruN 0.53.201204142/tex/diff.txt 0.56.20180123.12/tex/diff.txt
 0.53.201204142/tex/diff.txt 19700101 00:00:00.000000000 +0000
+++ 0.56.20180123.12/tex/diff.txt 20180113 13:52:03.000000000 +0000
@@ 0,0 +1,72 @@
+ ../../derivations0.55.20170612/tex/vcalc.tex 20170602 22:55:37.000000000 +0000
++++ ../../derivations0.55.20170613/tex/vcalc.tex 20170619 01:24:42.857784778 +0000
+@@ 251,9 +251,9 @@
+ makes the~$\nabla$ operator useful.
+
+ If~$\nabla$ takes the place of the ambiguous $d/d\ve r$, then what takes
+the place of the ambiguous $d/d\ve r'$, $d/d\ve r_o$, $d/d\tilde{\ve
+r}$, $d/d\ve r^\dagger$ and so on? Answer:~$\nabla'$, $\nabla_{\!o}$,
+$\tilde{\nabla}$, $\nabla^\dagger$ and so on. Whatever mark
++the place of the ambiguous $d/d\ve r_o$, $d/d\tilde{\ve
++r}$, $d/d\ve r^\dagger$, $d/d\ve r'$ and so on? Answer:~$\nabla_{\!o}$,
++$\tilde{\nabla}$, $\nabla^\dagger$, $\nabla'$ and so on. Whatever mark
+ distinguishes the special~$\ve r$, the same mark distinguishes the
+ corresponding special~$\nabla$. For example, where $\ve r_o = \vui
+ i_o$, there $\nabla_{\!o} = \vui\,\partial/\partial i_o$. That is the
+@@ 271,9 +271,11 @@
+ written. Refer to \S~\ref{vector:240}.
+ }
+
++\index{Hamilton, William Rowan (18051865)}
+ \index{Heaviside, Oliver (18501925)}
+Introduced by Oliver Heaviside, informally pronounced ``del'' (in the
+author's country at least), the vector differential operator~$\nabla$
++Introduced by William Rowan Hamilton and Oliver Heaviside, informally
++pronounced ``del'' (in the author's country at least), the vector
++differential operator~$\nabla$
+ finds extensive use in the modeling of physical phenomena. After a
+ brief digression to discuss operator notation, the subsections that
+ follow will use the operator to develop and present the four basic kinds
+@@ 496,7 +498,7 @@
+ }%
+ what matters is not the surface's area as such but rather the area
+ the surface presents to the flow. The surface presents its full area to
+a perpendicular flow, but otherwise the flow sees a foreshortened
++a perpendicular flow but otherwise the flow sees a foreshortened
+ surface, \emph{as though the surface were projected onto a plane
+ perpendicular to the flow.} Refer to Fig.~\ref{vector:220:figdot}.
+ Now realize that eqn.~\ref{vcalc:flux} actually describes flux not
+@@ 962,7 +964,7 @@
+ If~(\ref{vcalc:divthm}) is ``the divergence theorem,'' then
+ should~(\ref{vcalc:stokesthm}) not be ``the curl theorem''?
+ Answer: maybe it should be, but no one calls it that. Sir George
+ Gabriel Stokes evidently is not to be denied his fame!
++ Gabriel Stokes is evidently not to be denied his fame!
+ }
+ neatly relating the directional curl over a (possibly nonplanar) surface
+ to the circulation about it. Like the divergence
+@@ 1157,7 +1159,7 @@
+ themselves invariant. That the definitions and identities at the top of
+ the table are invariant, we have already seen; and \S~\ref{vcalc:350},
+ next, will give invariance to the definitions and identities at the
+bottom. The whole table therefore is invariant under rotation of axes.
++bottom. The whole table is therefore invariant under rotation of axes.
+
+ % 
+
+@@ 1177,9 +1179,13 @@
+ Like vector products and firstorder vector derivatives, secondorder
+ vector derivatives too come in several kinds, the simplest of which is
+ the \emph{Laplacian}%
+\footnote{
++\footnote{%
++ % diagn: new footnote; review
+ Though seldom seen in applied usage in the author's country, the
+ alternate symbol~$\Delta$ replaces~$\nabla^2$ in some books.
++ alternate symbol~$\Delta$ replaces~$\nabla^2$ in some books,
++ especially some British books. The author prefers the~$\nabla^2$,
++ which better captures the sense of the thing and which leaves~$\Delta$
++ free for other uses.%
+ }
+ \bq{vcalc:laplacian}
+ \begin{split}
diff pruN 0.53.201204142/tex/drvtv.tex 0.56.20180123.12/tex/drvtv.tex
 0.53.201204142/tex/drvtv.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/drvtv.tex 20180116 13:27:42.000000000 +0000
@@ 1,6 +1,6 @@
% 
\chapter{The derivative}
+\chapter{The derivative and its incidents}
\label{drvtv}
\index{calculus}
\index{calculus!the two complementary questions of}
@@ 14,29 +14,28 @@ The mathematics of \emph{calculus} conce
questions:%
\footnote{
Although once grasped the concept is relatively simple, to understand
 this pair of questions, so briefly stated, is no trivial thing. They
 are the pair which eluded or confounded the most brilliant
+ this pair of questionsso briefly statedis no trivial thing.
+ They are the pair which eluded or confounded the most brilliant
mathematical minds of the ancient world.
The greatest conceptual hurdlethe stroke of brillianceprobably
 lies simply in stating the pair of questions clearly. Sir Isaac
 Newton and G.W.~Leibnitz cleared this hurdle for us in the
+ lies in simply stating the pair of questions clearly. Sir Isaac
+ Newton and G.~W.\ Leibnitz cleared this hurdle for us in the
seventeenth century, so now at least we know the right pair of
questions to ask. With the pair in hand, the calculus beginner's
first task is quantitatively to understand the pair's
interrelationship, generality and significance. Such an understanding
constitutes the basic calculus concept.
 It cannot be the role of a book like this one to lead the beginner
 gently toward an apprehension of the basic calculus concept. Once
 grasped, the concept is simple and briefly stated. In this book we
 necessarily state the concept briefly, then move along.
 Many instructional textbooks\cite{Hamming} is a worthy
 examplehave been written to lead the beginner gently. Although a
 sufficiently talented, dedicated beginner could perhaps obtain the
 basic calculus concept directly here, he would probably find it
 quicker and more pleasant to begin with a book like the one
 referenced.
+ It cannot be the role of a book like this to lead the beginner gently
+ toward an apprehension of the basic calculus concept. Once grasped,
+ the concept is simple and briefly stated. Therefore, in this book, we
+ shall take the concept as simple, briefly state it, and then move along;
+ whereas, of course, you would find it hard first to learn calculus
+ like that.
+
+ Many instructional textbooks have been written to lead the beginner
+ gently. Worthy examples of such a textbook include~\cite{Hamming}.
}
\bi
\item Given some function $f(t)$, what is the function's instantaneous
@@ 65,7 +64,7 @@ A number~$\ep$ is an \emph{infinitesimal
\[
0 < \left\ep\right < a
\]
for all possible mundane positive numbers~$a$.
+for all possible, mundane positive numbers~$a$.
This is somewhat a difficult concept, so if it is not immediately clear
then let us approach the matter colloquially. Let me propose to you
@@ 112,7 +111,40 @@ the reach of our mundane number system.
If~$\ep$ is an infinitesimal, then $1/\ep$ can be regarded as an
\emph{infinity:} a very large number much larger than any mundane
number one can name.
+number one can name.\footnote{\label{drvtv:210.001fn}%
+ %\index{Berkeley, George (16851753), Bishop of Cloyne}
+ %\index{Robinson, Abraham (19181974)}
+ Some professional mathematicians have deprecated talk of this sort.
+ However, their reasons are abstruse and appear to bear little on
+ applications. See \S~\ref{intro:284}.
+
+ The literature in this and related matters is vast, deep and
+ inconclusive. \hspace{\stretch{1}} It includes
+ % superdiagn: bad break, or bad spacing. The several \stretch below
+ % and just above should be unnecessary
+ \cite{Berkeley}\hspace{\stretch{1}}%
+ \cite{Courant/Hilbert}\hspace{\stretch{1}}%
+ \cite{Dasgupta}\hspace{\stretch{1}}%
+ \cite{Fisher}\hspace{\stretch{1}}%
+ \cite{Frege}\hspace{\stretch{1}}%
+ \cite{Hardy}\hspace{\stretch{1}}%
+ \cite{Hersh}\hspace{\stretch{1}}%
+ \cite{Hershbook}\hspace{\stretch{1}}%
+ \cite{Hilbert:geometrie}\hspace{\stretch{1}}%
+ \cite{Pinter}\hspace{\stretch{1}}%
+ \cite{Robinson}\hspace{\stretch{1}}%
+ \cite{Russell:1903}\hspace{\stretch{1}}%
+ \cite{Shilov}\hspace{\stretch{1}}%
+ \cite{Sieg/Schlimm}\hspace{\stretch{1}}%
+ \cite{Toader}\hspace{\stretch{1}}%
+ \cite{Tymoczko}\hspace{\stretch{1}}%
+ \cite{Weierstrass:werke}\hspace{\stretch{1}}%
+ \cite{Weiner}\hspace{\stretch{1}}%
+ \cite{Weyl:1918}\hspace{\stretch{1}}%
+ \cite{Weyl:1950}\hspace{\stretch{1}}%
+ \cite{Wilson}
+ among others.%
+}
\index{$\delta$}
\index{$\epsilon$}
@@ 125,22 +157,22 @@ infinitesimals is in that it permits us
infinitesimal against another, to add them together, to divide them,
etc. For instance, if $\delta = 3\ep$ is another infinitesimal, then
the quotient $\delta/\ep$ is not some unfathomable~$0/0$; rather it is
$\delta/\ep = 3$. In physical applications, the infinitesimals are
often not true mathematical infinitesimals but rather relatively very
+$\delta/\ep = 3$. In physical applications, the infinitesimals often
+are not true mathematical infinitesimals but rather relatively very
small quantities such as the mass of a wood screw compared to the mass
of a wooden house frame, or the audio power of your voice compared to
that of a jet engine. The additional cost of inviting one more guest to
the wedding may or may not be infinitesimal, depending on your point of
+the wedding may or may not be infinitesimal, depending upon your point of
view. The key point is that the infinitesimal quantity be negligible by
comparison, whatever ``negligible'' means in the context.%
+comparison, whatever ``negligible'' might mean in the context.%
\footnote{
Among scientists and engineers who study wave phenomena, there is an
old rule of thumb that sinusoidal waveforms be discretized not less
finely than ten points per wavelength. In keeping with this book's
 adecimal theme (Appendix~\ref{hex}) and the concept of the hour of arc
+ adecimal theme (appendix~\ref{hex}) and the concept of the hour of arc
(\S~\ref{trig:260}), we should probably render the rule as
\emph{twelve} points per wavelength here. In any case, even very
 roughly speaking, a quantity greater then $1/\mbox{0xC}$ of the
+ roughly speaking, a quantity greater than $1/\mbox{0xC}$ of the
principal to which it compares probably cannot rightly be regarded as
infinitesimal. On the other hand, a quantity less than
$1/\mbox{0x10000}$ of the principal is indeed infinitesimal for most
@@ 151,10 +183,11 @@ comparison, whatever ``negligible'' mean
}
\index{infinitesimal!second and higherorder}
The secondorder infinitesimal~$\ep^2$ is so small on the scale of the
common, firstorder infinitesimal~$\ep$ that the even latter cannot
measure it. The~$\ep^2$ is an infinitesimal to the infinitesimals.
Third and higherorder infinitesimals are likewise possible.
+The secondorder infinitesimal~$\ep^2=(\ep)(\ep)$ is so small on the
+scale of the common, firstorder infinitesimal~$\ep$ that the even
+latter cannot measure it. The~$\ep^2$ is an infinitesimal to the
+infinitesimals. Third and higherorder infinitesimals likewise are
+possible.
\index{$\ll$ and~$\gg$}
The notation $u \ll v$, or $v \gg u$, indicates that~$u$ is much less than~$v$,
@@ 169,34 +202,38 @@ infinitesimal is to write that $\ep \ll
The notation
$\lim_{z\rightarrow z_o}$
indicates that~$z$ draws as near to~$z_o$ as it possibly can.
When written
+When written as
$\lim_{z\rightarrow z_o^+}$,
the implication is that~$z$ draws toward~$z_o$ from the positive side
such that $z > z_o$. Similarly, when written
+such that $z > z_o$. Similarly, when written as
$\lim_{z\rightarrow z_o^}$,
the implication is that~$z$ draws toward~$z_o$ from the negative side.
The reason for the notation is to provide a way to handle expressions
like
\[
 \frac{3z}{2z}
\]
as~$z$ vanishes:
\[
 \lim_{z\rightarrow 0}\frac{3z}{2z} = \frac{3}{2}.
\]
The symbol ``$\lim_Q$'' is short for ``in the limit as~$Q$.''

Notice that $\lim$ is not a function like $\log$ or $\sin$. It is
just a reminder that a quantity approaches some value, used when
saying that the quantity \emph{equaled} the value would be confusing.
Consider that to say
\[
 \lim_{z\rightarrow 2^} ( z + 2 ) = 4
\]
is just a fancy way of saying that $2+2=4.$ The $\lim$ notation is
convenient to use sometimes, but it is not magical. Don't let it
confuse you.
+\index{}
+The reason for the notation is to provide an orderly way to handle
+expressions like
+\[
+ \frac{f(z)}{g(z)}
+\]
+as~$z$ vanishes or approaches some otherwise difficult value. For
+example, if $f(z)\equiv 3z+5z^3$ and $g(z)\equiv 2z+z^2$, then
+\[
+ \lim_{z\rightarrow 0}\frac{f(z)}{g(z)}
+ = \lim_{z\rightarrow 0}\frac{3z+5z^3}{2z+z^2}
+ = \lim_{z\rightarrow 0}\frac{3+5z^2}{2+z}
+ = \frac{3+0}{2+0}
+ = \frac{3}{2},
+\]
+which is preferable to writing na\"ively that $f(z)/g(z)_{z=0} =
+0/0$ (the ``$_{z=0}$'' meaning, ``given that, or evaluated when,
+$z=0$''). The symbol ``$\lim_Q$'' is short for ``in the limit as~$Q$,''
+so ``$\lim_{z\rightarrow 0}$'' says, ``in the limit as~$z$
+approaches~0.''
+
+Observe that $\lim$ is not a function like $\log$ or $\sin$. Rather, it
+is a mere reminder. It is a reminder that a quantity like~$z$ approaches
+some value, used when saying that the quantity \emph{equaled}
+the value would be inconvenient or confusing.
% 
@@ 205,8 +242,8 @@ confuse you.
\index{combinatorics}
In its general form, the problem of selecting~$k$ specific items out of
a set of~$n$ available items belongs to probability theory (Ch.~\ref{prob}).
In its basic form, however, the same problem also applies to the
+a set of~$n$ available items belongs to probability theory (chapter~\ref{prob}).
+In its basic form however, the same problem also applies to the
handling of polynomials or power series. This section treats the
problem in its basic form.%
\footnote{\cite{Hamming}}
@@ 220,16 +257,17 @@ problem in its basic form.%
\index{block, wooden}
\index{wooden block}
Consider the following scenario. I have several small
+Consider the following scenario. I have several small,
wooden blocks of various shapes and sizes, painted different colors so
that you can clearly tell each block from the others. If I offer you
+that you can readily tell each block from the others. If I offer you
the blocks and you are free to take all, some or none of them at your
option, if you can take whichever blocks you want, then how many
distinct choices of blocks do you have? Answer: you have~$2^n$ choices,
because you can accept or reject the first block, then accept or reject
the second, then the third, and so on.
+option, if you can take whichever blocks you like, then how many
+distinct choices of blocks confront you? Answer: the symbol~$n$
+representing the number of blocks I have, a total of~$2^n$ distinct
+choices confront you, for you can accept or reject the first block, then
+accept or reject the second, then the third, and so on.
Now, suppose that what you want is exactly~$k$ blocks, neither more nor
+Now, suppose that what you want are exactly~$k$ blocks, neither more nor
fewer. Desiring exactly~$k$ blocks, you select your favorite block
first: there are~$n$ options for this. Then you select your second
favorite: for this, there are $n1$ options (why not~$n$ options?
@@ 243,7 +281,7 @@ evidently
ordered ways, or \emph{permutations,} available for you to select
exactly~$k$ blocks.
However, some of these distinct permutations put exactly
+However, some of these distinct permutations would put exactly
the same \emph{combination} of blocks in your hand; for instance,
the permutations redgreenblue and greenredblue constitute the same
combination, whereas redwhiteblue is a different combination
@@ 255,43 +293,99 @@ evidently~$k!$ permutations are possible
greenbluered,
blueredgreen,
bluegreenred%
). Hence dividing the number of permutations~(\ref{drvtv:220:20}) by~$k!$
yields the number of combinations
+). Thus, dividing the number of permutations~(\ref{drvtv:220:20})
+by~$k!$ yields the number of combinations
\bq{drvtv:220:30}
\cmb{n}{k} \equiv \frac{n!/(nk)!}{k!}.
\eq
\index{combination!properties of}
+\index{combinatorics!properties of}
\index{Pascal's triangle!neighbors in}
Properties of the number {$\cmbl{n}{k}$} of combinations include that
\bqa
 \cmb{n}{nk} &=& \cmb{n}{k},\label{drvtv:220:31}\\
 \sum_{k=0}^{n} \cmb{n}{k} &=& 2^n,\label{drvtv:220:34}\\
 \cmb{n1}{k1} + \cmb{n1}{k} &=& \cmb{n}{k},\label{drvtv:220:37}\\
 \cmb{n}{k} &=& \frac{nk+1}{k}\cmb{n}{k1}\label{drvtv:220:41}\\
 &=& \frac{k+1}{nk}\cmb{n}{k+1}\label{drvtv:220:42}\\
 &=& \frac{n}{k}\cmb{n1}{k1}\label{drvtv:220:43}\\
 &=& \frac{n}{nk}\cmb{n1}{k}.\label{drvtv:220:44}
\eqa
Equation~(\ref{drvtv:220:31}) results from changing the variable
$k\la nk$ in~(\ref{drvtv:220:30}).
Equation~(\ref{drvtv:220:34}) comes directly from the observation (made
at the head of this section) that~$2^n$ total combinations are possible
if any~$k$ is allowed. Equation~(\ref{drvtv:220:37}) is seen when an
$n$th blocklet us say that it is a black blockis added to an
existing set of $n1$ blocks; to choose~$k$ blocks then, you can either
choose~$k$ from the original set, or the black block plus $k1$ from the
original set. Equations~(\ref{drvtv:220:41})
through~(\ref{drvtv:220:44}) come directly from the
+Table~\ref{drvtv:220:tbl} repeats the definitions~(\ref{drvtv:220:20})
+and~(\ref{drvtv:220:30}), and then proceeds to list several properties of
+the number {$\cmbl{n}{k}$} of combinations.
+\begin{table}
+ \caption{Combinatorical properties.}
+ \index{combination!properties of}
+ \index{combinatorics!properties of}
+ \label{drvtv:220:tbl}
+ \bc
+ \bqb
+ P\cmb{n}{k} &\equiv& n!/(nk)!\\
+ \cmb{n}{k} &\equiv& \frac{n!/(nk)!}{k!}
+ = \frac{1}{k!}P\cmb{n}{k}\\
+ &=& \cmb{n}{nk}\\
+ &=& \cmb{n1}{k1} + \cmb{n1}{k}\\
+ &=& \frac{nk+1}{k}\cmb{n}{k1}\\
+ &=& \frac{k+1}{nk}\cmb{n}{k+1}\\
+ &=& \frac{n}{k}\cmb{n1}{k1}\\
+ &=& \frac{n}{nk}\cmb{n1}{k}\\
+ \sum_{k=0}^{n} \cmb{n}{k} &=& 2^n
+ \eqb
+ \ec
+\end{table}
+Among the several properties, the property of the table's third line
+results from changing the variable $k\la nk$ in~(\ref{drvtv:220:30}).
+The property of the table's fourth line is seen when an $n$th
+blocklet us say that it is a black blockis added to an existing
+set of $n1$ blocks: to choose~$k$ blocks then, you can choose
+either~$k$ from the original set, or the black block plus $k1$ from the
+original set. The next four lines come directly from the
definition~(\ref{drvtv:220:30}); they relate combinatoric coefficients
to their neighbors in Pascal's triangle (\S~\ref{drvtv:220.30}).
+to their neighbors in Pascal's triangle (\S~\ref{drvtv:220.30}). The
+last line merely observes, again as at the head of this section,
+that~$2^n$ total combinations are possible if any~$k$ is allowed.
Because one can choose neither fewer than zero nor more than~$n$ from~$n$
blocks,
\bq{drvtv:220:48}
\cmb n k = 0 \ \ \mbox{unless}\ 0 \le k \le n.
\eq
For $\cmbl n k$ when $n<0$, there is no obvious definition.
+For $\cmbl n k$ when $n<0$, there is no obvious definition.\footnote{%
+ \index{Silverman, Richard~A.}%
+ \index{faux rigor}%
+ \index{rigor!faux}%
+ So, does that mean that $\cmbl n k$ is not allowed when $n<0$? Answer:
+ probably. After all, it seems hard to imagine how one could allow such a
+ quantity while retaining internal consistency within
+ Table~\ref{drvtv:220:tbl}, for a division by zero seems to be implied.
+ However, the question may not be the sort of question the
+ applied mathematician is even likely to ask. He is likely to ask, rather,
+ what $\cmbl n k$, $n<0$, would meanif anything\emph{in light of a
+ particular physical problem of interest.} Only once the latter question has
+ been answered will the applied mathematician consider whether or how to treat
+ the quantity.
+ %
+ % The below may be interesting but is not included in the book for now.
+ %
+ %The prospect of one single, absolute, formal logic to comprise all
+ %mathematical circumstances is deeply alluring to many mathematicians (though
+ %admittedly less deeply to this writer). However, a book of applied
+ %mathematics would not be the place to look for such a logic. Indeed, not
+ %even the professionals have discovered such a logic (see
+ %footnote~\ref{drvtv:210.001fn}, \S~\ref{intro:284.4}
+ %and~\cite[Preface]{Shilov}); and yet, even if they had, the applied
+ %mathematician would hardly think along such lines, anyway.
+ %
+ %The book includes several footnotes like the one you are
+ %reading. It includes them in part because, in the Western world in
+ %the writer's day, an irksome, indefinable social expectation has grown
+ %among us applied mathematicians that the applied mathematician should
+ %sporadically engage in what one might call ``faux rigor.'' The
+ %results of such engagement are
+ %almost always unsatisfactory. Such engagement clarifies nothing and
+ %incidentally, moreover, for what it's worth, as one suspects, leaves
+ %professional mathematicians unimpressed. Such engagement is species
+ %of cant. We should stop doing it. Many of us have stopped doing it.
+ %It does not help.
+ %
+ %During the Soviet era of the 20th century, Soviet mathematicians
+ %seemed to suffer less from the social problem in
+ %question, which may be the chief reason Englishspeaking engineers so valued
+ %Richard~A.\ Silverman's excellent series of translations (\cite{Lebedev}
+ %and~\cite{Shilov}, for instance) of mathematical books from the Russian.%
+}
\subsection{Pascal's triangle}
\label{drvtv:220.30}
@@ 307,35 +401,64 @@ possible $\cmbl{n}{k}$.
\[
\br{c}
\cmbl{0}{0} \\
 \cmbl{1}{0} \cmbl{1}{1} \\
 \cmbl{2}{0} \cmbl{2}{1} \cmbl{2}{2} \\
 \cmbl{3}{0} \cmbl{3}{1} \cmbl{3}{2} \cmbl{3}{3} \\
 \cmbl{4}{0} \cmbl{4}{1} \cmbl{4}{2} \cmbl{4}{3} \cmbl{4}{4} \\
+ \cmbl{1}{0}\cmbl{1}{1} \\
+ \cmbl{2}{0}\cmbl{2}{1}\cmbl{2}{2} \\
+ \cmbl{3}{0}\cmbl{3}{1}\cmbl{3}{2}\cmbl{3}{3} \\
+ \cmbl{4}{0}\cmbl{4}{1}\cmbl{4}{2}\cmbl{4}{3}\cmbl{4}{4} \\
+ \cmbl{5}{0}\cmbl{5}{1}\cmbl{5}{2}\cmbl{5}{3}\cmbl{5}{4}\cmbl{5}{5} \\
\vdots
\er
\]
\ec
\end{figure}
Evaluated, this yields Fig.~\ref{drvtv:pasc}, \emph{Pascal's triangle.}
Notice how each entry in the triangle is the sum of the two entries
immediately above, as~(\ref{drvtv:220:37}) predicts. (In fact this is
the easy way to fill Pascal's triangle out: for each entry, just add the
+% bad break
+Eval\u\ated, this yields Fig.~\ref{drvtv:pasc}, \emph{Pascal's triangle.}
+Notice that each entry in the triangle is the sum of the two entries
+immediately above, as Table~\ref{drvtv:220:tbl} predicts. (In fact, this is
+the easy way to fill out Pascal's triangle: for each entry, just add the
two entries above.)
\begin{figure}
\nc\di[1]{\makebox[\wdig][c]{#1}}
 \caption{Pascal's triangle.}
+ \caption{Pascal's triangle (in hexadecimal notation).}
\label{drvtv:pasc}
\bc
\[
\br{c}
1 \\
 1\ \ 1 \\
 1\ \ 2\ \ 1 \\
 1\ \ 3\ \ 3\ \ 1 \\
 1\ \ 4\ \ 6\ \ 4\ \ 1 \\
 1\ \ 5\ \ \di A\ \ \di A\ \ 5\ \ 1 \\
 1\ \ 6\ \ \di F\ \ \di{14}\ \ \di F\ \ 6\ \ 1 \\
 1\ \ 7\ \ \di{15}\ \ \di{23}\ \ \di{23}\ \ \di{15}\ \ 7\ \ 1 \\
+ 1\ \ \ 1 \\
+ 1\ \ \ 2\ \ \ 1 \\
+ 1\ \ \ 3\ \ \ 3\ \ \ 1 \\
+ 1\ \ \ 4\ \ \ 6\ \ \ 4\ \ \ 1 \\
+ 1\ \ \ 5\ \ \ \di A\ \ \ \di A\ \ \ 5\ \ \ 1 \\
+ 1\ \ \ 6\ \ \ \di F\ \ \ \di{14}\ \ \ \di F\ \ \ 6\ \ \ 1 \\
+ 1\ \ \ 7\ \ \ \di{15}\ \ \ \di{23}\ \ \ \di{23}\ \ \ \di{15}\ \ \ 7\ \ \ 1 \\
+ 1\ \ \ 8\ \ \ \di{1C}\ \ \ \di{38}\ \ \ \di{46}\ \ \ \di{38}\ \ \ \di{1C}\ \ \ 8\ \ \ 1 \\
+ 1\ \ \ 9\ \ \ \di{24}\ \ \ \di{54}\ \ \ \di{7E}\ \ \ \di{7E}\ \ \ \di{54}\ \ \ \di{24}\ \ \ 9\ \ \ 1 \\
+ \vdots
+ \er
+ \]
+ \ec
+\end{figure}
+\begin{figure}
+ \nc\di[1]{\makebox[\wdig][c]{#1}}
+ \caption{Pascal's triangle (in decimal notation).}
+ \label{drvtv:pascdec}
+ \setlength\tla{0.13em}
+ \newcommand\tta{0.6}
+ \bc
+ \[
+ \br{c}
+ 1 \\
+ 1\ \ \ 1 \\
+ 1\ \ \ 2\ \ \ 1 \\
+ 1\ \ \ 3\ \ \ 3\ \ \ 1 \\
+ 1\ \ \ 4\ \ \ 6\ \ \ 4\ \ \ 1 \\
+ 1\ \ \ 5\ \ \ \di{10}\ \ \ \di{10}\ \ \ 5\ \ \ 1 \\
+ 1\ \ \ 6\ \ \ \di{15}\ \ \ \di{20}\ \ \ \di{15}\ \ \ 6\ \ \ 1 \\
+ 1\ \ \ 7\ \ \ \di{21}\ \ \ \di{35}\ \ \ \di{35}\ \ \ \di{21}\ \ \ 7\ \ \ 1 \\
+ 1\ \ \ 8\ \ \ \di{28}\ \ \ \di{56}\ \ \ \di{70}\ \ \ \di{56}\ \ \ \di{28}\ \ \ 8\ \ \ 1 \\
+ 1\ \ \ 9\ \ \ \di{36}\ \ \ \di{\hspace{\tta\tla}84}\ \ \ \di{\hspace{\tla}126}%
+ \ \ \ \di{126\hspace{\tla}}\ \ \ \di{84\hspace{\tta\tla}}\ \ \ \di{36}\ \ \ 9\ \ \ 1 \\
\vdots
\er
\]
@@ 356,26 +479,27 @@ consequences.
The \emph{binomial theorem} holds that%
\footnote{
 % diagn: this revised footnote wants one last review.
The author is given to understand that, by an heroic derivational
 effort,~(\ref{drvtv:230:binth}) can be extended to nonintegral~$n$.
 However, since applied mathematics does not usually concern itself
 with hard theorems of little known practical use, the extension as
 such is not covered in this book. What is coveredin
 Table~\ref{taylor:315:tbl}is the Taylor series for $(1+z)^{a1}$
 for complex~$z$ and complex~$a$, which amounts to much the same thing.
+ effort,~(\ref{drvtv:230:binth}) can be extended directly to
+ nonintegral~$n$. However, we shall have no immediate need for such an
+ extension. Later, in Table~\ref{taylor:315:tbl}, we will compute
+ the Taylor series for $(1+z)^{a1}$, anyway, which indirectly amounts
+ to much the same thing as the extension, and has a more elegant form
+ to boot, and moreover (at least in the author's experience) arises
+ much more often in applications.%
}
\bq{drvtv:230:binth}
(a+b)^n = \sum_{k=0}^n \cmb{n}{k} a^{nk}b^k.
\eq
In the common case that $a=1$, $b=\ep$, $\left \ep \right \ll 1$,
this is
+this is that
\bq{drvtv:230:binthe}
(1+\ep)^n = \sum_{k=0}^n \cmb{n}{k} \ep^k
\eq
(actually this holds for any~$\ep$, small or large; but the typical case
of interest has $\ep \ll 1$). In either form, the binomial theorem is
a direct consequence of the combinatorics of \S~\ref{drvtv:220}. Since
+(actually, eqn.~\ref{drvtv:230:binthe} holds for any~$\ep$, small or
+large; but the typical case of interest has that $\ep \ll 1$). In
+either form, the binomial theorem is a direct consequence of the
+combinatorics of \S~\ref{drvtv:220}. Since
\[
(a+b)^n = (a+b)(a+b)\cdots(a+b)(a+b),
\]
@@ 401,8 +525,9 @@ that%
\[
1 + m\ep_o \approx (1+\ep_o)^m
\]
to excellent precision. Furthermore, raising the equation to the $1/m$
power then changing $\delta \la m\ep_o$, we have that
+to arbitrary precision as long as~$\ep_o$ is small enough.
+Furthermore, raising the equation to the $1/m$ power then changing
+$\delta \la m\ep_o$, we have that
\[
(1 + \delta)^{1/m} \approx 1+\frac{\delta}{m}.
\]
@@ 411,7 +536,7 @@ equation above that this implies that $\
\[
(1 + \ep)^{n/m} \approx 1+\frac{n}{m}\ep.
\]
Inverting this equation yields
+Inverting this equation yields that
\[
(1+\ep)^{n/m} \approx \frac{1}{1 + (n/m)\ep} =
\frac{[1(n/m)\ep]}{[1(n/m)\ep][1+(n/m)\ep]} \approx 1\frac{n}{m}\ep.
@@ 423,36 +548,67 @@ Taken together, the last two equations i
for any real~$x$.
\index{Taylor expansion, firstorder}
The writer knows of no conventional name%
\footnote{
 Actually, ``the firstorder Taylor expansion'' is a conventional name
 for it, but so unwieldy a name does not fit the present context.
 Ch.~\ref{taylor} will introduce the Taylor expansion as such.
}
for~(\ref{drvtv:230:apxex}), but named or unnamed it is an important
equation. The equation offers a simple, accurate way of approximating
any real power of numbers in the near neighborhood of~1.
+\index{firstorder Taylor expansion}
+Obscurely,~(\ref{drvtv:230:apxex}) is called the \emph{firstorder Taylor
+expansion.} The reason the equation is called by such an unwieldy name
+will be explained in chapter~\ref{taylor}, but howsoever the equation
+may be called, it is an important result. The equation offers a
+simple, accurate way of approximating any real power of numbers in the
+near neighborhood of~1.
\subsection{Complex powers of numbers near unity}
\label{drvtv:230.35}
\index{power!complex}
\index{complex power}
+\index{definition}
Equation~(\ref{drvtv:230:apxex}) is fine as far as it goes, but its very
form suggests the question: what if~$\ep$ or~$x$, or both, are complex?
+form suggests the question: what if~$\ep$ or~$x$, or both, is complex?
Changing the symbol $z\la x$ and observing that the infinitesimal~$\ep$
may also be complex, one wants to know whether
+also may be complex, one wants to know whether
\bq{drvtv:230:apxe}
(1+\ep)^z \approx 1 + z\ep
\eq
still holds. No work we have yet done in the book answers the question,
because although a complex infinitesimal~$\ep$ poses no particular
+because though a complex infinitesimal~$\ep$ poses no particular
problem, the action of a complex power~$z$ remains undefined. Still, for
consistency's sake, one would like~(\ref{drvtv:230:apxe}) to hold. In
fact nothing prevents us from defining the action of a complex power
+fact nothing prevents us from \emph{defining} the action of a complex power
such that~(\ref{drvtv:230:apxe}) does hold, which we now do, logically
extending the known result~(\ref{drvtv:230:apxex}) into the new domain.
+But we cannot just define that, can we? Surely we cannot glibly assert
+that ``nothing prevents us'' and then go to define whatever we like!
+
+Can we?
+
+Actually, yes, in this case we can. Consider that, insofar
+as~(\ref{drvtv:230:apxe}) holds,
+{
+ \newcommand\tta{\ensuremath(1+\ep)^{z_1+z_2}}
+ \newcommand\ttb{\ensuremath(1+\ep)^{z_1z_2}}
+ \settowidth\tla{$\tta$}
+ \bqb
+ \lefteqn{\makebox[\tla][l]{$\tta$}
+ = (1+\ep)^{z_1}(1+\ep)^{z_2} \approx (1+z_1\ep)(1+z_2\ep)}
+ &&\\&&\makebox[\tla][l]{}
+ = 1+z_1\ep+z_2\ep+z_1z_2\ep^2 \approx 1+(z_1+z_2)\ep;
+ \\
+ \lefteqn{\makebox[\tla][l]{$\ttb$} = \left[(1+\ep)^{z_1}\right]^{z_2}
+ \approx
+ [1+z_1\ep]^{z_2} \approx 1+z_1z_2\ep;}
+ \eqb
+}%
+and so on. These alone do not of course conclusively prove that our new
+definition is destined to behave well in every circumstance of future interest.
+Experience will tell. Notwithstanding, in the meantime, since we seem unable
+for the moment to identify a relevant circumstance in which our new definition
+misbehaves, since our definition does seem a natural extension
+of~(\ref{drvtv:230:apxex}), since it does not seem to contradict
+anything we already know, and since no obvious alternative presents
+itself, let us provisionally accept the definition and find out to what
+results it leads.
+
Section~\ref{cexp:230} will investigate the extremely interesting
effects which arise when $\Re(\ep)=0$ and the power~$z$
in~(\ref{drvtv:230:apxe}) grows large, but for the moment we shall use
@@ 469,28 +625,45 @@ application of the derivative, as follow
\index{derivative!balanced form}
\index{derivative!unbalanced form}
Having laid down~(\ref{drvtv:230:apxe}), we now stand in a position
properly to introduce the chapter's subject, the derivative. What is
the derivative? The \emph{derivative} is the instantaneous rate or
slope of a function. In mathematical symbols and for the moment using
real numbers,
+With~(\ref{drvtv:230:apxe}) at least provisionally in hand, we can now turn to
+the chapter's subject proper, the derivative.
+
+What is the derivative? The \emph{derivative} is the instantaneous rate
+or slope of a function. In mathematical symbols and for the moment
+using real numbers,\footnote{%
+ Professional mathematicians tend to prefer another, more
+ selfcontained definition. Section~\ref{drvtv:240.38} will briefly
+ present it. See too
+ % The word "eqns." should normally be omitted, but in this instance it
+ % disambiguates compared to the last sentence.
+ eqns.~(\ref{drvtv:defz}) and~(\ref{drvtv:defzunbal}).%
+}
\bq{drvtv:def}
f'(t) \equiv \lim_{\ep\rightarrow 0^+} \frac{f(t+\ep/2)f(t\ep/2)}{\ep}.
\eq
Alternately, one can define the same derivative in the unbalanced form
\[
 f'(t) = \lim_{\ep\rightarrow 0^+} \frac{f(t+\ep)f(t)}{\ep},
\]
but this book generally prefers the more elegant balanced
form~(\ref{drvtv:def}), which we will now use in developing the
derivative's several properties through the rest of the chapter.%
\footnote{
 From this section through \S~\ref{drvtv:260}, the mathematical
 notation grows a little thick. There is no helping this. The reader
 is advised to tread through these sections line by stubborn line, in
 the good trust that the math thus gained will prove both interesting
 and useful.
}
+Alternately,
+\bq{drvtv:defunbal}
+ f'(t) \equiv \lim_{\ep\rightarrow 0^+} \frac{f(t+\ep)f(t)}{\ep}.
+\eq
+Because~$\ep$ is infinitesimal, either the balanced
+definition~(\ref{drvtv:def}) or the unbalanced
+definition~(\ref{drvtv:defunbal}) should in theory yield the same result
+(where it does not, you have a problem: the derivative does not exist at
+that value of~$t$; for example, given $f[t]=1/t$, $f'[t]_{t=0}$ does not
+exist despite that it exists at other values of~$t$). Both definitions
+have their uses but applied mathematicians tend to prefer the
+balanced~(\ref{drvtv:def}) because it yields comparatively accurate
+results in practical approximations in which~$\ep$, though small, is not
+actually infinitesimal.\footnote{\cite[\S\S~I:9.6
+and~I:9.7]{Feynman}\cite[\S~4.3.4]{Cunningham}}
+Except where otherwise
+stated, this book will prefer the balanced~(\ref{drvtv:def})or
+rather, as we shall eventually see, will prefer its generalized form,
+the balanced~(\ref{drvtv:defz}).
+
+(Note: from this section through \S~\ref{drvtv:260}, the mathematical
+notation necessarily grows a little thick. This cannot be helped, so if
+you are reading straight through, be prepared for a bit of a hard slog.)
\subsection{The derivative of the power series}
\label{drvtv:240.20}
@@ 512,7 +685,7 @@ says that
(1+\ep/2t)^k  (1\ep/2t)^k
}{\ep}.
\eqb
Applying~(\ref{drvtv:230:apxe}), this is
+Applying~(\ref{drvtv:230:apxe}),
\[
f'(t)
= \sum_{k=\infty}^{\infty} \lim_{\ep\rightarrow 0^+}
@@ 523,12 +696,22 @@ Applying~(\ref{drvtv:230:apxe}), this is
\]
which simplifies to
\bq{drvtv:240:polyderiv}
 f'(t) = \sum_{k=\infty}^{\infty} c_kkt^{k1}.
+ f'(t) = \sum_{k=\infty}^{\infty} c_kkt^{k1},
\eq
+assuming of course that the sum converges.%
+\footnote{%
+ The book will seldom again draw attention to such caveats of abstract
+ rigor, even in passing. For most \emph{concrete} series to which one
+ is likely to apply~(\ref{drvtv:240:polyderiv}) in practice, the
+ series' convergence or nonconvergence will be plain enough on its
+ face, as abstract considerations of theoretical sumworthiness fade
+ into an expedient irrelevance. (For a closer applied consideration of
+ sumworthiness nevertheless, see~\cite{Andrews}.)
+}
Equation~(\ref{drvtv:240:polyderiv}) gives the general derivative of the
power series.%
\footnote{
 Equation~(\ref{drvtv:240:polyderiv}) admittedly has not explicitly
+\footnote{%
+ Equation~(\ref{drvtv:240:polyderiv}) has not admittedly, explicitly
considered what happens when the real~$t$ becomes the complex~$z$, but
\S~\ref{drvtv:240.50} will remedy the oversight.
}
@@ 546,20 +729,7 @@ power series.%
The $f'(t)$ notation used above for the derivative is due to Sir Isaac
Newton, and is easier to start with. Usually better on the whole,
however, is G.W.~Leibnitz's notation%
\footnote{
 This subsection is likely to confuse many readers the first time
 they read it. The reason is that Leibnitz elements like~$dt$ and~$\partial
 f$ usually tend to appear in practice in certain specific
 relations to one another, like $\partial f/\partial z$. As a
 result, many users of applied mathematics have never developed a
 clear understanding as to precisely what the individual symbols
 mean. Often they have developed positive misunderstandings.
 Because there is significant practical benefit in learning how to
 handle the Leibnitz notation correctlyparticularly in applied
 complex variable theorythis subsection seeks to present each
 Leibnitz element in its correct light.
}
+however (but see appendix~\ref{purec}), is G.~W.\ Leibnitz's notation,
\bqb
dt &=& \ep,\\
df &=& f(t+dt/2)f(tdt/2),
@@ 568,133 +738,566 @@ such that per~(\ref{drvtv:def}),
\bq{drvtv:240:50}
f'(t) = \frac{df}{dt}.
\eq
Here~$dt$ is the infinitesimal, and~$df$ is a dependent infinitesimal
whose size \emph{relative to~$dt$} depends on the independent variable~$t$.
For the independent infinitesimal~$dt$, conceptually, one can
choose any infinitesimal size~$\ep$. Usually the exact choice of size
does not matter, but occasionally when there are two independent
variables it helps the analysis to adjust the size of one of the
independent infinitesimals with respect to the other.

The meaning of the symbol~$d$ unfortunately depends on the context.
In~(\ref{drvtv:240:50}), the meaning is clear enough: $d(\cdot)$
signifies how much $(\cdot)$ changes when the independent variable~$t$
increments by~$dt$.%
\footnote{
 If you do not fully understand this sentence, reread it carefully with
 reference to~(\ref{drvtv:def}) and~(\ref{drvtv:240:50}) until you
 do; it's important.
}
Notice, however, that the notation~$dt$ itself has two distinct meanings:%
\footnote{
 This is difficult, yet the author can think of no clearer, more
 concise way to state it. The quantities~$dt$ and~$df$ represent
 coordinated infinitesimal changes in~$t$ and~$f$ respectively, so
 there is usually no trouble with treating~$dt$ and~$df$ as though they
 were the same kind of thing. However, at the fundamental level they
 really aren't.

 If~$t$ is an independent variable, then~$dt$ is just an infinitesimal
 of some kind, whose specific size could be a function of~$t$ but more
 likely is just a constant. If a constant, then~$dt$ does not
 fundamentally have anything to do with~$t$ as such. In fact, if~$s$
 and~$t$ are both independent variables, then we can (and in complex
 analysis sometimes do) say that $ds=dt=\ep$, after which nothing
 prevents us from using the symbols~$ds$ and~$dt$ interchangeably.
 Maybe it would be clearer in some cases to write~$\ep$
 instead of~$dt$, but the latter is how it is conventionally written.

 By contrast, if~$f$ is a dependent variable, then~$df$ or $d(f)$ is
 the amount by which~$f$ changes as~$t$ changes by~$dt$. The~$df$ is
 infinitesimal but not constant; it is a function of~$t$. Maybe it
 would be clearer in some cases to write~$d_tf$ instead of~$df$, but
 for most cases the former notation is unnecessarily cluttered; the
 latter is how it is conventionally written.

 Now, most of the time, what we are interested in is not~$dt$ or~$df$
 as such, but rather the ratio $df/dt$ or the sum $\sum_k f(k\,dt) \,dt
 = \int f(t) \,dt$. For this reason, we do not usually worry about
 which of~$df$ and~$dt$ is the independent infinitesimal, nor do we
 usually worry about the precise value of~$dt$. This leads one to
 forget that~$dt$ does indeed have a precise value. What confuses is
 when one changes perspective in midanalysis, now regarding~$f$ as the
 independent variable. Changing perspective is allowed and perfectly
 proper, but one must take care: the~$dt$ and~$df$ after the change are
 not the same as the~$dt$ and~$df$ before the change. However, the
 ratio $df/dt$ remains the same in any case.

 Sometimes when writing a differential equation like the
 potentialkinetic energy equation $ma\,dx=mv\,dv$, we do not necessarily
 have either~$v$ or~$x$ in mind as the independent variable. This is
 fine. The important point is that~$dv$ and~$dx$ be coordinated so
 that the ratio $dv/dx$ has a definite value no matter which of
 the two be regarded as independent, or whether the independent be some
 third variable (like~$t$) not in the equation.

 One can avoid the confusion simply by keeping the $dv/dx$ or $df/dt$
 always in ratio, never treating the infinitesimals individually. Many
 applied mathematicians do precisely that. That is okay as far as it
 goes, but it really denies the entire point of the Leibnitz notation.
 One might as well just stay with the Newton notation in that case.
 Instead, this writer recommends that you learn the Leibnitz notation
 properly, developing the ability to treat the infinitesimals
 individually.

 Because the book is a book of applied mathematics, this footnote does
 not attempt to say everything there is to say about infinitesimals.
 For instance, it has not yet pointed out (but does so now) that even
 if~$s$ and~$t$ are equally independent variables, one can have $dt =
 \ep(t)$, $ds = \delta(s,t)$, such that~$dt$ has prior independence to~$ds$.
 The point is not to fathom all the
 possible implications from the start; you can do that as the need
 arises. The point is to develop a clear picture in your mind of what
 a Leibnitz infinitesimal really is. Once you have the picture, you
 can go from there.
}
\bi
 \item the independent infinitesimal $dt=\ep$; and
 \item $d(t)$, which is how much $(t)$ changes as~$t$ increments by~$dt$.
\ei
At first glance, the distinction between~$dt$ and $d(t)$ seems a
distinction without a difference; and for most practical cases of
interest, so indeed it is. However, when switching perspective in
midanalysis as to which variables are dependent and which are
independent, or when changing multiple independent complex variables
simultaneously, the math can get a little tricky. In such cases, it may
be wise to use the symbol~$dt$ to mean $d(t)$ only, introducing some
unambiguous symbol like~$\ep$ to represent the independent
infinitesimal. In any case you should appreciate the conceptual
difference between $dt=\ep$ and $d(t)$, both of which nonetheless
normally are written~$dt$.
+Here,~$dt$ is the infinitesimal, and~$df$ is a dependent infinitesimal
+whose size \emph{relative to~$dt$} depends on the independent
+variable~$t$.
+
+\index{mental image of the infinitesimal}
+\index{infinitesimal!mental image of}
+Conceptually, one can choose any sufficiently small size~$\ep$ for the
+independent infinitesimal~$dt$; and, actually, though we have
+called~$dt$ ``independent,'' what we really mean is that the
+variable~$t$ with which~$dt$ is associated is independent. The size
+of~$dt$ may be constant (this is typically easiest) but may instead
+depend on~$t$ as $dt=\ep(t)$. Fortunately, one seldom needs to say, or
+care, what the size of an independent infinitesimal like~$dt$ is. All
+one normally needs to worry about are the sizes of other infinitesimals
+in proportion to~$dt$.
+
+As an example of the infinitesimal's use,
+if $f(t) \equiv 3t^3  5$, then $f(t \pm dt/2) = 3(t \pm
+dt/2)^3  5 = 3t^3 \pm (9/2)t^2\,dt + (9/4)t\,dt^2 \pm (3/8)dt^3  5$,
+whence $df = f(t+dt/2)f(tdt/2) = 9t^2\,dt + (3/4)dt^3$, and thus
+$df/dt = 9t^2 + (3/4)dt^2$which has that $df/dt = 9t^2$ in the limit
+as~$dt$ tends to vanish. The example is easier
+if~(\ref{drvtv:230:apxe}) is used to approximate that $f[(t)(1 \pm
+dt/2t)] \approx 3t^3 \pm (9/2)t^2\,dt  5$, the details of which are
+left as an exercise.
+
+Where two or more independent variables are simultaneously in play,
+say~$s$ and~$t$, the mathematician can have two, distinct independent
+infinitesimals~$ds$ and~$dt$or, as one often styles them in such
+cases,~$\partial s$ and~$\partial t$. The size of~$\partial s$ may be
+constant but may depend on~$s$, $t$, or both, as $\partial
+s=\delta(s,t)$ where the~$\delta$ is like~$\ep$ an infinitesimal;
+and, likewise, the size of~$\partial t$ may be constant but may depend
+on~$s$, $t$, or both, as $\partial t=\ep(s,t)$. Fortunately, as before,
+one seldom needs to say or care what the sizes are.
+
+An applied mathematician ought to acquire, develop and retain a clear,
+lively, flexible mental image of Leibnitz's infinitesimal.
+
+\subsection{Considerations of the Leibnitz notation}
+\label{drvtv:240.26}
+\index{Leibnitz, Gottfried Wilhelm (16461716)}
+\index{Leibnitz notation}
+\index{$d$ and~$\partial$}
+\index{$\partial$ and~$d$}
+
+The precise meaning of Leibnitz's letter~$d$ subtly depends on its
+context. In~(\ref{drvtv:240:50}), the meaning is clear enough:
+$d(\cdot)$ signifies the amount by which~$(\cdot)$ changes while the
+independent variable~$t$ is increasing by~$dt$. Indeed, so
+essential is this point to the calculus concept that it bears repeating
+for emphasis!
+\begin{quote}
+ {\scshape Insofar as~$(\cdot)$ depends on~$t$, the notation
+ $d(\cdot)$ signifies the amount by which~$(\cdot)$ changes while~$t$
+ is increasing by~$dt$.}
+\end{quote}
+
+\noindent%
+\index{independent variable!multiple}%
+\index{variable!independent, multiple}%
+\index{Jacobi, Carl Gustav Jacob (18041851)}%
+The following notational anomaly intrudes to complicate the matter.
+Where two or more independent variables are at work in the same
+equation or model, for instance~$s$ and~$t$, convention warps Leibnitz's
+letter~$d$ into the shape of Carl Jacobi's letter~$\partial$ (already
+seen in \S~\ref{drvtv:240.25}). Convention warps the letter, not for
+any especially logical reason, but as a visual reminder that multiple
+independents are in play. For example, if $f(s,t) \equiv s^2 + 3st^2 +
+t^4$, then $\partial_s f = (2s+3t^2)\partial s$ [which represents the
+change~$f$ undergoes while~$s$ is increasing by an increment~$\partial
+s$ and~$t$ is held constant] but $\partial_t f = (6st+4t^3)\partial t$
+[which represents the change~$f$ undergoes while~$t$ is increasing by an
+increment~$\partial t$ and~$s$ is held constant].
+
+In practice, the style of~$\partial_s f$ and~$\partial_t f$ is usually
+overelaborate. Usually, one abbreviates each as~$\partial f$. Context
+normally clarifies.
+%\footnote{\index{Cantor, Georg (18451918)}\index{Weierstrass, Karl Wilhelm Theodor (18151897)}%
+% % This footnote is a bit sharp, even impudent, maybe (though I still
+% % kinda like it). THB
+% So, did Cantor not make a good point after all when he
+% execrated~\cite{Bell:Cantor} the very concept of the infinitesimal?
+% Answer: no, in your author's opinion, he did not.
+%
+% If Cantor execrates the infinitesimal because he feels that the
+% institution of the infinitesimal frustrates his program, yet the
+% program nonetheless fails after the execrable device has been excluded
+% from it, then Cantor's distaste can hardly oblige you and me to
+% abandon the device.%
+%}
\index{partial derivative}
\index{derivative!partial}
Where two or more independent variables are at work in the same
equation, it is conventional to use the symbol~$\partial$ instead of~$d$,
as a reminder that the reader needs to pay attention to which~$\partial$
tracks which independent variable.%
\footnote{
 The writer confesses that he remains unsure why this minor distinction
 merits the separate symbol~$\partial$, but he accepts the notation as
 conventional nevertheless.
}
A derivative $\partial f/\partial t$ or $\partial f/\partial s$ in this
case is sometimes called by the slightly misleading name of
\emph{partial derivative}.
(If needed or desired, one can write $\partial_t f$ when tracking~$t$,
$\partial_s f$ when tracking~$s$, etc. Use discretion, though.
Such notation appears only rarely in the literature,
so your audience might not understand it when you write it.)
Conventional shorthand for $d(df)$ is~$d^2f$; for $(dt)^2$, $dt^2$; so
+\index{differential equation}
+\index{differential equation!ordinary}
+\index{differential equation!partial}
+\index{ordinary differential equation}
+\index{partial differential equation}
+A derivative like $\partial f/\partial s$ or $\partial f/\partial t$
+(that is, like $\partial_s f/\partial s$ or $\partial_t f/\partial t$) is
+called a \emph{partial derivative} because in it, only one of two or
+more independent variables is varying. An equation containing
+derivatives (whether partial or otherwise), or containing infinitesimals
+like~$df$ or~$\partial f$ that represent the change a dependent variable
+like~$f$ undergoes, is called a \emph{differential equation.} A
+differential equation whose derivatives or infinitesimals track more
+than one independent variable is called a \emph{partial differential
+equation.}\footnote{%
+ Chapter~\ref{vcalc} gives many examples of partial differential
+ equations, for instance~(\ref{vcalc:440.cyl:grad}).%
+}
+A differential equation whose derivatives or
+infinitesimals track only one independent variable is called an
+\emph{ordinary differential equation.}
+
+\index{nonstandard notation}
+\index{notation!nonstandard}
+Observe incidentally that the notation~$\partial_s f$ is nonstandard.
+For obscure reasons (\S\S~\ref{drvtv:240.405} and~\ref{drvtv:240.407}),
+the style usually instead seen in print is that of~$(\partial f/\partial
+s)ds$, rather.
+
+\index{partial~$d$}
+\index{warped~$d$}
+\index{$d,$ warped or partial}
+The symbol~$\partial$ is merely a warped letter~$d$.
+Chapter~\ref{integ} will use the warped letter a little, as will \S\S~\ref{taylor:370}
+and~\ref{mtxinv:420}. Chapter~\ref{vcalc} will use the warped letter a lot.
+% diagn: Should a reference be given here to the cylindrical solution
+% of the wave eqn., maybe among others, once these have been added to
+% the book?
+
+\index{formal parameter}
+\index{parameter!formal}
+\index{energy!potential}
+\index{energy!kinetic}
+\index{potential energy}
+\index{kinetic energy}
+\index{independent variable!lack of}
+\index{variable!independent, lack of}
+We have mentioned equations with two or more independent variables.
+However, some equations with infinitesimals, such as the
+potentialkinetic energy equation that $ma\,dx=mv\,dv$, do not
+explicitly include or refer to any independent variable at all.%
+\footnote{%
+ The~$m$ stands for mass, the~$x$ for position,
+ the~$v$ for speed, and the~$a$ for acceleration. The model's
+ independent variable would probably be~$t$ for time but that variable
+ does not happen to appear in this equation.%
+}
+Context can sometimes supply an independent the equation does not
+mention, like~$t$, upon which~$x$ and~$v$ both depend; but it may be
+that the equation speaks only to how~$x$ and~$v$ change conjointly,
+without suggesting that either change caused the other and without
+explicit reference to an independent of any kind. Another example of
+the sort would be the economist's demandelasticity equation,
+$e\,dP/P=dQ/Q$, which speaks to how~$P$ and~$Q$ change conjointly.%
+\footnote{%
+ The~$e$ (stated as a unitless negative number) stands for demand
+ elasticity, the~$P$ for price, and the~$Q$ for quantity demanded.
+ Refer to~\cite[chapter~4]{Jothi/Srin}.%
+}
+This is all right. Moreover, even in the rare application in which the
+lack of an independent does pose some trouble, one can often remedy
+the trouble by introducing a purely formal parameter to serve as it were
+an independent.
+
+\index{factor of integration}
+\index{infinitesimal factor of integration}
+\index{integration!infinitesimal factor of}
+Convention sports at least one other notational wrinkle we should
+mention, a wrinkle that
+comes into view from chapter~\ref{integ}. Convention writes $\int_0^t
+f(\tau)\,d\tau$ rather than $\int_0^t f(\tau)\,\partial\tau$, which is
+to say that it eschews the
+\linebreak % bad break
+warped~$\partial$ when writing chapter~\ref{integ}'s
+infinitesimal factor of integration. One could explain this by
+observing that the true independent~$t$ acts as a constant within the
+dummy's scope, and thus that the dummy sees itself as a lone
+independent within that scope; but whatever the explanation, that is how
+mathematicians will write the thing.
+% The notational distinction between~$\partial$ and~$d$ is ubiquitous,
+% entrenched and harmless; and, besides, the distinction lends a
+% certain, classic look to equations in which the symbol~$\partial$
+% appears, the rounded~$\partial$ visually balancing the straight
+% division bar over and under which it typically appears; whereas the
+% straightbacked~$d$ puts a firm stop to the sinuous~$\int$. Handsome
+% equations, after all, are for that not all to the bad.
+
+\subsection{Remarks on the Leibnitz notation}
+\label{drvtv:240.405}
+
+A deep mystery is implicated. None has wholly plumbed it. Perhaps none
+ever will.
+
+\index{Thales (fl.~585~B.C.)}
+\index{Anaximander (fl.~570~B.C.)}
+\index{Anaximenes (fl.~550~B.C.)}
+\index{Heraclitus (fl.~500~B.C.)}
+\index{Parmenides (b.~c.~515~B.C.)}
+\index{Zeno of Elea (fl.~460~B.C.)}
+\index{Melissus (fl.~440~B.C.)}
+\index{Anaxagoras (500428~B.C.)}
+\index{Leucippus (fl.~440~B.C.)}
+\index{Democritus (b.~c.~458 B.C.)}
+\index{Eudoxus (408355~B.C.)}
+\index{Euclid (325265~B.C.)}
+\index{Archimedes (287212~B.C.)}
+\index{Plato (428348~B.C.)}
+\index{Aristotle (384322~B.C.)}
+\index{Epicurus (341271~B.C.)}
+\index{Zeno of Cition (fl.~250~B.C.)}
+\index{Chrysippus (280206~B.C.)}
+\index{antiquity}
+\index{arithmetic}
+\index{geometry}
+\index{continuity}
+\index{discreteness}
+During antiquity,
+Thales,
+Anaximander,
+Anaximenes,
+Heraclitus,
+Parmenides,
+Zeno of Elea,
+Melissus,
+Anaxagoras,
+Leucippus,
+Democritus,
+Eudoxus,
+Euclid,
+Archimedes,
+Epicurus,
+Zeno of Cition,
+Chrysippus,
+Plato and
+Aristotle\footnote{These names and others are marshaled and accounted by~\cite{Bell:2006}.}
+long debatedunder various formswhether material reality and, more
+pertinently, immaterial reality\footnote{%
+ An influential school of thought asserts that immaterial reality does
+ not exist, or that it might as well not exist. The school is
+ acknowledged but the writer makes no further comment except that that
+ is not what this paragraph is about.
+
+ Meanwhile, mathematical ontology is something we can discuss but
+ general ontology lies beyond the writer's expertise.%
+}
+are essentially \emph{continuous,} as geometry; or essentially
+\emph{discrete,} as arithmetic. (Here we use ``arithmetic'' in the
+ancient sense of the word\@.) We still do not know. Indeed, we do not
+even know whether this is the right question to ask.
+
+\index{profundity}
+\index{question!tailoring}
+\index{tailoring the question}
+One feels obliged to salute the erudition of the
+professional mathematician's ongoing effort to find the question and
+give the answer; and yet, after twentyfive centuries, when
+the best efforts to give the answer seem to have succeeded chiefly to
+the extent to which they have \emph{tailored the question}\footnote{%
+ \index{Hercules}%
+ Is this adverse criticism? No. Indeed, one can hardly see what else
+ the best efforts might have done, given the Herculean task those
+ efforts had set for themselves. A task may, however, be too large, or
+ be inherently impossible, even for Hercules.%
+}
+to suit whatever answer is currently known and deemed best, why, the
+applicationist's interest in the matter may waver.
+
+What the applicationist knows or believes is this: that the continuous
+and the discretewhether each separately or both togetherappeal
+directly to the mathematical intuition. If the mind's eye already sees
+both, and indeed sees them together in the same mental scene, then one
+may feel little need further to deconstruct the two.\footnote{%
+ \index{mind's eye}%
+ \index{eye of the mind}%
+ The mind's eye may be deceived where pure analysis does not err, of
+ course, insofar as pure analysis relies upon disciplined formalisms.
+ This is not denied. What the mind's eye \emph{is,} and how it is able
+ to perceive the abstract, are great questions of epistemology
+ beyond the book's scope.%
+}
+
+One might, of course, inadvertently obscure the mental scene by
+elaborating certain definitions, innocently having meant to place the
+whole scene upon an indisputably unified basis;
+but, if one does elaborate certain definitions \emph{and this act indeed
+obscures the scene,} then one might ask:
+is it not the definitions which are suspect?
+Nowhere after all is it written that any indisputably unified basis
+shall, even in principle, be accessible to the mind of man.\footnote{%
+ ``But~ZFC is such a basis!'' comes the objection.
+
+ \index{Wittgenstein, Ludwig (18891951)}
+ However, whether~ZFC is truly a basis or is rather a clever
+ contraption recently bolted onto the side of pre\"existing, even
+ primordial mathematics is a question one can debate. Wittgenstein
+ debated it. See \S~\ref{intro:284.4}.%
+}
+Such a basis might be accessible, or it might not. The search is
+honorable and worthwhile, but it can utterly fail. We are not required
+to abandon mathematics if it does.
+
+To the applicationist meanwhile, during the modeling of a given
+physical system, the choice of whether to employ the continuous or the
+discrete will chiefly depend not on abstract considerations but rather
+on the idiosyncratic demands of the problem at hand.
+
+Such is the applicationist's creed.
+
+\subsection{The professional's creed; further remarks}
+\label{drvtv:240.407}
+
+Yet, what of the \emph{professional's} creed? May the professional not
+also be heard? And, anyway, what do any of these things have to do with
+the Leibnitz notation?
+
+\index{Brouwer, Luitzen Egbertus Jan (18811966)}
+\index{truths of mathematics, the}
+\index{stone}
+\index{mountain, barren}
+\index{disconsolate infinity}
+\index{infinity!disconsolate}
+\index{immovability}
+\index{toleration}
+\index{indulgence}
+The professional may indeed be heard, and more eloquently elsewhere than
+in this book; but, for this book's purpose, the answers are a matter
+of perspective.
+The professional mathematician L.~E.~J.\ Brouwer has memorably
+described the truths of mathematics as ``fascinating by their
+immovability, but horrifying by their lifelessness, like stones from
+barren mountains of disconsolate
+infinity.''\footnote{\label{drvtv:240.407fn1}Source:
+\cite{vanAtten}\@. Brouwer later changed his mind.}
+Brouwer's stones have not always appeared to the professional
+to countenance Leibnitz's infinitesimal.\footnote{\cite{Bell:Cantor}}
+%of \S\S~\ref{drvtv:240.25} and~\ref{drvtv:240.26}
+Though the professional may tolerate, though the professional may even
+indulge, the applicationist's use of Leibnitz's infinitesimal as an
+expedient, the professional may find himself unable to summon greater
+enthusiasm for the infinitesimal than this.
+
+\index{Bertrand Russell (18721970)}
+Indeed, he may be even less enthusiastic. As the professional
+mathematician Bertrand Russell succinctly judges,
+``[I]{n}{f}{i}{n}{i}{t}{e}{s}{i}{m}{a}{l}{s} as explaining continuity
+must be regarded as unnecessary, erroneous, and
+selfcontradictory.''\footnote{\cite{Bell:2006}}
+
+\index{Cantor, Georg (18451918)}
+\index{Aquinas, St.~Thomas (12251274)}
+The professional mathematician Georg Cantor recounts:
+\begin{quote}
+ [A] great quarrel arose among the philosophers, of whom some
+ followed Aristotle, others Epicurus; still others, in order to remain
+ aloof from this quarrel, declared with Thomas Aquinas that the
+ continuum consisted neither of infinitely many nor of a finite number
+ of parts, but of absolutely no parts. This last opinion seems to me to
+ contain less an explanation of the facts than a tacit confession that
+ one has not got to the bottom of the matter and prefers to get
+ genteelly out of its way.\footnote{\emph{Ibid.}}
+\end{quote}
+(The present writer has no opinion on St.~Thomas' declaration or on Cantor's
+interpretation thereof but, were it possible, would concur in Thomas'
+preference to get out of the way. Alas!)
+
+\index{Weyl, Hermann (18851955)}
+\index{Robinson, Abraham (19181974)}
+\index{bridge}
+\index{weather forecast}
+\index{integrated circuit}
+\index{space shot}
+On the opposite side, you have professional mathematicians like
+Hermann Weyl (as quoted in \S\S~\ref{intro:284.4} and~\ref{intro:284.45})
+and Abraham Robinson, along maybe with Brouwer himself,\footnote{See
+footnote~\ref{drvtv:240.407fn1}.} who have seemed to suggest that
+professional mathematics might rearrangeor at least search out a more
+congenial perspective uponBrouwer's immovable stones to countenance
+the infinitesimal
+nevertheless.\footnote{\cite{Weyl:1918}\cite{Scholz}\cite{Robinson}\cite{Bell:2006}}
+All the while, scientists and engineers have cheerfully kept plying the
+infinitesimal. Scientists and engineers appear to have been obtaining
+good results, too: bridges; weather forecasts; integrated circuits;
+space shots; etc. It seems that whether one approves the infinitesimal
+depends chiefly on whether one's focus is in applications or in
+foundations.
+
+The present book's focus is of course in applications. Fortunately,
+if you are a Platonist as the author is, or even if you are an
+intuitionist as Brouwer was, then this particular collision of
+foundations against applications need not much disturb you. See
+\S~\ref{intro:284.4}.
+
+So, what of the infinitesimal, this concept which has exercised the
+great philosophical minds of the ages? After all this, how shall the
+student of applied mathematics now approach it?
+
+\index{consensus!vacillating}
+\index{vacillating consensus}
+Following twentyfive centuries of spirited debate and vacillating
+consensus, the prudent student will remain skeptical of \emph{whatever}
+philosophy the latest consensus (or this book) might press him to adopt.
+%
+Otherwise, at the undergraduate level at any rate, many students though
+having learned a little calculus have not quite learned how to approach
+the infinitesimal at all. That is, they have never developed a clear
+intuition as to what Leibnitz elements like~$df$, $dt$, $\partial g$
+and~$\partial x$ individually might meanespecially when these
+students have seen such elements heretofore only in certain, specific
+combinations like $df/dt$, $\partial g/\partial x$ and $\int f(t)\,dt$.
+Often, these students have developed positive misunderstandings
+regarding such elements. The vacillation of consensus may be blamed for
+this.
+
+% This paragraph was interesting in its initial conception but, as the
+% surrounding text has evolved, the paragraph has proved unable to
+% evolve with it.
+%\index{ZermeloFraenkel and Choice set theory~(ZFC)}%
+%\index{ZFC}%
+%Teachers must take some of the blame for that, cautiously hesitating
+%between the one school and the other, yet it is
+%hard to see what else the teacher of our present era might done. A
+%teachersimultaneously confronted by Leibnitz's infinitesimal and,
+%contrarily, by Cantor's (and Russell's) antiin\finites\i\mal
+%programwants to turn both ways at once. After all, Cantor's
+%program in the form of~ZFC techniques (\S~\ref{intro:284.45}) has
+%remarkably succeeded at representing or paralleling many kinds of
+%mathematical logic;\footnote{The writer takes the professional's word
+%for this.} whereas Leibnitz's infinitesimal has
+%midwifed the modern industrialized world. Still, to hesitate between
+%the one and the other when first introducing students to calculus has
+%arguably been a mistake.
+
+Therefore, in this book, having acknowledged that an opposing,
+meritorious school of thought exists, let us not hesitate between the
+two schools. Though the writer acknowledges the cleverness of certain
+of Cantor's results (as for example in~\cite[\S\S~1.8 and~2.4]{Shilov})
+and allows Cantor's ambition regarding the continuum its due, the author
+is an applicationist and thus his book (you are reading it) esteems
+Leibnitz more highly than it does Cantor. The charge could be leveled
+that the author does not grasp Cantor and there would be some truth in
+this charge, but the book's esteem for Leibnitz is more than a mere
+matter of preferred style. A mystery of mathematical philosophy is
+involved and deeply entwined, touched upon in \S~\ref{intro:284} and
+again in this section, as Cantor himself would undoubtedly have been
+the first to insist. Also, remember, Weyl disputed Cantor, too.
+
+%\index{Weierstrass, Karl Wilhelm Theodor (18151897)}%
+%\index{Dedekind, Richard (18311916)}%
+Even setting aside foundational mysteries and the formidable Cantor,
+there is at any rate significant practical benefit in learning how to
+handle the Leibnitz notation correctly.
+Physicists, chemists, engineers and
+economists have long been used to handling Leibnitz elements
+individually. For all these reasons among others, the present
+section seeks to present each Leibnitz element in its proper,
+individual light.
+
+\index{Robinson, Abraham (19181974)}
+The chief recent source to which professional mathematicians seem to
+turn to bridge the questions this section ponders is Robinson's~1966
+book \emph{Nonstandard Analysis}~\cite{Robinson}\@.
+%\footnote{The author of the book you are now
+%reading, an engineer, does not pretend to have digested Robinson's
+%book despite that Robinson is a good writer; but to quote from
+%Robinson's first two pages is nonetheless entertaining.}
+To conclude the subsection, therefore, we may hear Robinson's words:
+\begin{quotation}
+ \noindent
+ Suppose that we ask a welltrained mathematician for the meaning of
+ [the derivative]
+ \[
+ \lim_{x\rightarrow x_o} \frac{f(x)f(x_o)}{xx_o} = a.
+ \]
+ %Then we may rely on it that \ldots\ his explanation will be thus:
+ Then we may rely on it that [he will explain it as
+ \S~\ref{drvtv:240.38} below].
+
+ Let us now ask our mathematician whether he would not accept the
+ following more direct interpretation\mdots
+
+ For any~$x$ in the interval of definition of $f(x)$ such that
+ $dx=xx_o$ is \emph{infinitely close} to~$0$ but not equal to~$0$,
+ the ratio $df/dx$, where
+ \[
+ df=f(x)f(x_o),
+ \]
+ is \emph{infinitely close} to~$a$.
+
+ To this question we may expect the answer that our definition may be
+ simpler in appearance but unfortunately it is also meaningless. If
+ we then try to explain that two numbers are infinitely close to one
+ another if their distance \ldots\ is \emph{infinitely small},\,\ldots\
+ we shall probably be faced with the rejoinder that this is possible
+ only if the numbers co\"incide. And, so we may be told charitably,
+ this obviously is not what we meant since it would make our
+ explanation trivially wrong.
+
+ However, in spite of this shattering rebuttal, the idea of
+ infinitely small or \emph{infinitesimal} quantities seems to appeal
+ naturally to our intuition. At any rate, the use of infinitesimals
+ was widespread during the formative stages of the Differential and
+ Integral Calculus\mdots~\cite[\S~1.1]{Robinson}.
+\end{quotation}
+\index{Derbyshire, John (1945)}%
+\index{rugby}%
+Your author is an engineer. John Derbyshire
+quips\footnote{Derbyshire is author of~\cite{Derbyshire} but this quip
+came, not written, but spoken by him in~2015.} that we
+``[e]{n}{g}{i}{n}{e}{e}{r}{s} were a semicivilized tribe on an adjacent
+island, beery oafs who played hard rugby and never listened to concert
+music.'' Thus it seems fittingwhether your author be an oaf or
+not!that in the book you are reading the ``shattering rebuttal''
+Robinson's hypothetical interlocutor has delivered shall not unduly
+discomfit us.
+%\footnote{%
+% If the writer may write in the first person for a moment:
+%
+% I should have preferred not to speak of such philosophical matters at
+% all, did mathematical convention not leave one caught between the two
+% horns of a dilemma. On the one horn, modern styles of mathematical
+% rigor impede the communication of the main point of a mathematical
+% idea to a scientist or engineer. On the other horn, to avoid such
+% styles in formal writing seems to confess a lack of knowledge.
+% \emph{The style does not help but one must use it anyway,} especially
+% if one has not earned latitude by gaining an advanced degree in
+% mathematics!
+%
+% [The footnote is abandoned from here. I had thought that I was going
+% somewhere with it, but now am unsure to where.]
+%}
+
+The book will henceforth ply the Leibnitz notation and its
+infinitesimals vigorously, with little further hedge, cavil or
+equivocation.
+
+\subsection{Higherorder derivatives}
+\label{dvrtv:240.41}
+\index{derivative!higherorder}
+\index{higherorder derivative}
+
+\index{derivative!second}
+\index{second derivative}
+\index{third derivative}
+Conventional shorthand for $d(df)$ is~$d^2\!f$; for $(dt)^2$, $dt^2$; so
\[
 \frac{d(df/dt)}{dt} = \frac{d^2f}{dt^2}
+ \frac{d(df/dt)}{dt} = \frac{d^2\!f}{dt^2}
\]
is a derivative of a derivative, or \emph{second derivative.} By
extension, the notation
\[
 \frac{d^kf}{dt^k}
+ \frac{d^k\!f}{dt^k}
\]
represents the $k$th derivative.
+represents the $k$th derivative. For example, if $k=3$, then
+$d[d(df)]=d^3\!f$, by which one writes $d^3\!f/dt^3$ for the derivative of
+a derivative of a derivative, or third derivative. So, if $f(t)\equiv t^4/8$,
+then $df/dt = t^3/2$, $d^2\!f/dt^2 = 3t^2/2$ and $d^3\!f/dt^3 = 3t$.
\subsection{The derivative of a function of a complex variable}
\label{drvtv:240.50}
@@ 703,67 +1306,107 @@ represents the $k$th derivative.
\index{variable!complex}
\index{function!of a complex variable}
For~(\ref{drvtv:def}) to be robust, written here in the slightly more
general form
+\index{referential infinitesimal}
+\index{infinitesimal!referential}
+For~(\ref{drvtv:def}) to be robust, one should like its ratio to
+approach a single, common value for all sufficiently small~$\ep$, for
+only when~$\ep$ grows beyond infinitesimal size should the ratio
+of~(\ref{drvtv:def}) become inexact. However,~(\ref{drvtv:def})
+considers only real, positive~$\ep$. What if~$\ep$ were not positive?
+Indeed, what if~$\ep$ were not even real?
+
+\index{derivative!balanced form}
+\index{derivative!unbalanced form}
+This turns out to be an important question, so let us now
+revise~(\ref{drvtv:def}) to establish the slightly more general form
\bq{drvtv:defz}
 \frac{df}{dz} = \lim_{\ep\rightarrow 0} \frac{f(z+\ep/2)f(z\ep/2)}{\ep},
+ \frac{df}{dz} \equiv \lim_{\ep\rightarrow 0} \frac{f(z+\ep/2)f(z\ep/2)}{\ep}
\eq
one should like it to evaluate the same in the limit
regardless of the complex phase of~$\ep$. That is, if~$\delta$ is a
positive real infinitesimal, then it should be equally valid to let
+and let us incidentally revise~(\ref{drvtv:defunbal}), also, to
+establish the corresponding unbalanced form
+\bq{drvtv:defzunbal}
+ \frac{df}{dz} \equiv \lim_{\ep\rightarrow 0}
+ \frac{f(z+\ep)f(z)}{\ep},
+\eq
+where as in the section's introduction so here too applications tend to
+prefer the balanced~(\ref{drvtv:defz}) over the
+unbalanced~(\ref{drvtv:defzunbal}).
+
+\index{derivative!nonexistent}
+As in~(\ref{drvtv:def}), so too in~(\ref{drvtv:defz}) one should like the
+ratio to approach a single, common value\footnote{%
+ One can construct apparent exceptions like $f(z) = \sin(1/z)$. If
+ feeling obstreperous, one can construct far more unreasonable
+ exceptions such as the one found toward the end of
+ \S~\ref{taylor:320}.
+ % diagn: check that the above reference continues to hold as the draft
+ % changes
+ The applied mathematician can hardly be asked to
+ expand his definitions to accommodate all such mischief! He hasn't
+ the time.
+
+ \index{Hadamard, Jacques (18651953)}
+ When an apparent exception of the less unreasonable kinds arises in
+ the context of a particular physical model, rather than attempting to
+ accommodate the exception under the roof of an abstruse, universal
+ rule, the applicationist is more likely to cook up a way to work
+ around the exception in the specific context of the physical model at
+ hand (as for example in the socalled Hadamard finite part
+ of~\cite{WTAng}).%
+ %
+ %To the applicationist, the overgeneralization of methods is a vice,
+ %for the most general possible method may also be the most useless
+ %possible method. A prudent degree of specialization (even if the
+ %specialization is not explicitly delineated) can be preferable to an
+ %attempt to accommodate all abstractly conceivable cases under the roof
+ %of a single rule.
+ %
+ %You could write a whole book on how to expand definitions to
+ %accommodate abstractly conceivable cases. Some have done. By the
+ %time you had finished writing the book, though, the rest of us may
+ %have forgotten the question! No, the applicationist cannot afford the
+ %time, but must be a bit more practical than that. The applicationist
+ %wants to keep his focus from straying too far from the collection of
+ %physical models with which he might actually have to work.%
+}
+for all sufficiently small~$\ep$. However, in~(\ref{drvtv:defz}) one
+must consider not only the magnitude~$\ep$ of the referential
+infinitesimal but also its phase $\arg \ep$ (\S~\ref{alggeo:225}).
+%\footnote{
+% This book however prefers the balanced~(\ref{drvtv:defz}), among other
+% reasons because~(\ref{drvtv:defz}) more reliably approximates the
+% derivative of a function for which only discrete samples are
+% available~\cite[\S\S~I:9.6
+% and~I:9.7]{Feynman}\cite[\S~4.3.4]{Cunningham}\@. Moreover, for this
+% writer at least the balanced definition just better captures the
+% subjective sense of the thing.%
+%}
+For example, supposing that the symbol~$\delta$ represented some
+positive, real infinitesimal, it should be equally valid to let
$\ep=\delta$, $\ep=\delta$, $\ep=i\delta$, $\ep=i\delta$,
$\ep=(4i3)\delta$ or any other infinitesimal value, so long as
$0<\left\ep\right\ll 1$. One should like the derivative~(\ref{drvtv:defz}) to
come out the same regardless of the Argand direction from which~$\ep$
approaches 0 (see Fig.~\ref{alggeo:225:fig}). In fact for the sake of
robustness, one normally demands that derivatives do come out the same
regardless of the Argand direction; and~(\ref{drvtv:defz}) rather
+$\ep=(4i3)\delta$, or any other infinitesimal value.
+The ratio $df/dt$ of~(\ref{drvtv:defz}) ought to come out the same
+for all these.
+In fact, for the sake of robustness, one normally demands that the ratio
+does come out the same; and~(\ref{drvtv:defz}) rather
than~(\ref{drvtv:def}) is the definition we normally use for the
derivative for this reason. Where the limit~(\ref{drvtv:defz}) is
sensitive to the Argand direction or complex phase of~$\ep$, there we
normally say that the derivative does not exist.
+derivative for this reason. Specifically, where the
+limit~(\ref{drvtv:defz}) or even~(\ref{drvtv:defzunbal}) is sensitive to
+$\arg\ep$, there we normally say that the derivative does not exist.
\index{differentiability}
Where the derivative~(\ref{drvtv:defz}) does existwhere the
derivative is finite and insensitive to Argand directionthere we say
that the function $f(z)$ is \emph{differentiable.}%
\footnote{
 % diagn: this footnote still wants review.
 The unbalanced definition of the derivative from
 \S~\ref{drvtv:240}, whose complex form is
 \[
 \frac{df}{dz} = \lim_{\ep\rightarrow 0} \frac{f(z+\ep)f(z)}{\ep},
 \]
 does not always serve applications as well as does the balanced
 definition~(\ref{drvtv:defz}) this book prefers. Professional
 mathematicians have different needs, though. They seem to prefer the
 unbalanced nonetheless.

 In the professionals'
 favor, one acknowledges that the balanced definition strictly
 misjudges the modulus function $f(z) = \leftz\right$ to be
 differentiable solely at the point $z=0$, whereas that the unbalanced
 definition, probably more sensibly, judges the modulus to be
 differentiable nowherethough the writer is familiar with no
 significant appliedmathematical implication of the distinction.
 (Would it co\"ordinate the two definitions to insist that a derivative
 exist not only at a point but everywhere in the point's immediate,
 complex neighborhood? The writer does not know. It is a question for
 the professionals.) Scientists and engineers tend to
 prefer the balanced definition among other reasons because it more
 reliably approximates the derivative of a function for which only
 discrete samples are available~\cite[\S\S~I:9.6 and~I:9.7]{Feynman}\@.
 Moreover, for this writer at least the balanced definition just better
 captures the subjective sense of the thing.
}
+Where the derivative~(\ref{drvtv:defz}) does existthat is, where the
+derivative is finite and is insensitive to our choice of a complex,
+infinitesimal value for~$\ep$there we say that the function $f(z)$ is
+\emph{differentiable.}
Excepting the nonanalytic parts of complex numbers ($\left\cdot\right$,
$\arg[\cdot]$, $[\cdot]^{*}$, $\Re[\cdot]$ and $\Im[\cdot]$; see
\S~\ref{alggeo:225.3}), plus the Heaviside unit step $u(t)$ and the
Dirac delta $\delta(t)$ (\S~\ref{integ:670}), most functions encountered
in applications do meet the criterion~(\ref{drvtv:defz}) except at
isolated nonanalytic points (like $z=0$ in $h[z]=1/z$ or
$g[z]=\sqrt z$). Meeting the criterion, such functions are fully
+isolated nonanalytic points (like $z=0$ in $h[z]\equiv 1/z$ or
+$g[z]\equiv\sqrt z$). Meeting the criterion, such functions are fully
differentiable except at their poles (where the derivative goes infinite
in any case) and other nonanalytic points. Particularly, the key
formula~(\ref{drvtv:230:apxe}), written here as
@@ 776,16 +1419,17 @@ derivative~(\ref{drvtv:240:polyderiv}) o
\frac{d}{dz} \sum_{k=\infty}^{\infty} c_kz^k
= \sum_{k=\infty}^{\infty} c_kkz^{k1}
\eq
holds equally well for complex~$z$ as for real.
+holds equally well for complex~$z$ as for real (but see also the next
+subsection).
\subsection{The derivative of~$z^a$}
\label{drvtv:240.30}
\index{derivative!of~$z^a$}
Inspection of \S~\ref{drvtv:240.20}'s logic in light
+Inspection of the logic of \S~\ref{drvtv:240.20} in light
of~(\ref{drvtv:230:apxe}) reveals that nothing prevents us from
replacing the real~$t$, real~$\ep$ and integral~$k$ of that section with
arbitrary complex~$z$, $\ep$ and~$a$. That is,
+arbitrary, complex~$z$, $\ep$ and~$a$. That is,
\bqb
\frac{d(z^a)}{dz}
&=& \lim_{\ep\rightarrow 0} \frac{
@@ 811,34 +1455,76 @@ is another matter, treated in \S~\ref{ce
its~(\ref{cexp:230:33}); but in any case you can
use~(\ref{drvtv:240.30:10}) for real~$a$ right now.
+\subsection{An alternate definition of the derivative}
+\label{drvtv:240.38}
+
+Professional mathematicians tend to prefer a less
+picturesque, alternate definition of the derivative~(\ref{drvtv:defz})
+or~(\ref{drvtv:defzunbal}):
+``For any positive number~$\ep$ there exists a positive
+number~$\delta$ such that
+\bq{drvtv:defp}
+ \left\frac{f(z)f(z_o)}{zz_o}  a\right < \ep
+\eq
+for all~$z$ \ldots\ for which
+\bq{drvtv:defpd}
+ 0<zz_o<\delta,
+\eq
+[the quantity~$a$ being the \emph{derivative} $df/dz$].''\footnote{%
+ The quoted original is~\cite[\S~1.1]{Robinson}, from which the
+ notation has been adapted to this book's usage.%
+}%
+
+Equations~(\ref{drvtv:defp}) and~(\ref{drvtv:defpd}) bring few practical
+advantages to applications but are at least more selfcontained
+than~(\ref{drvtv:defz}) or~(\ref{drvtv:defzunbal}) is. However that may
+be, the derivative is such a pillar of mathematics that it behooves the
+applied mathematician to learn at least to recognize the professional's
+preferred definition of it. See also \S\S~\ref{drvtv:240.405}
+and~\ref{drvtv:240.407}.
+% I believe that the following is wrong.
+%Amusingly,
+%unlike~(\ref{drvtv:defz}), (\ref{drvtv:defp}) automatically notices
+%that, if $f(x) = 1/x$ (for example), then the derivative
+%$df/dx_{x=0}=a_{x=0}$ does not exist. How practically useful this
+%kind of automation is in applications, and whether it pays for the extra
+%esotericity~(\ref{drvtv:defp}) and~(\ref{drvtv:defpd}) bring, can be
+%debated.
+
\subsection{The logarithmic derivative}
\label{drvtv:240.40}
\index{logarithmic derivative}
\index{derivative!logarithmic}
\index{rate!relative}
\index{relative rate}
+\index{rate!proportional}
+\index{proportional rate}
\index{interest}
\index{bond}
Sometimes one is more interested in knowing the rate of $f(t)$
\emph{relative to the value of $f(t)$} than in knowing the absolute rate
+\index{interest}
+\index{rate!absolute}
+\index{rate!of interest}
+\index{absolute rate}
+Sometimes one is more interested to know the rate of $f(t)$
+\emph{in proportion to the value of $f(t)$} than to know the absolute rate
itself. For example, if you inform me that you earn $\$\:1000$ a year
on a bond you hold, then I may commend you vaguely for your thrift but
otherwise the information does not tell me much. However, if you inform
me instead that you earn~10 percent a year on the same bond, then I
might want to invest. The latter figure is a relative rate, or
\emph{logarithmic derivative,}
+might want to invest. The latter figure is a \emph{proportional rate}
+or \emph{logarithmic derivative,}
\bq{drvtv:240.40:10}
\frac{df/dt}{f(t)} = \frac{d}{dt}\ln f(t).
\eq
The investment principal grows at the absolute rate $df/dt$, but the
bond's interest rate is $(df/dt)/f(t)$.
+The investment principal grows at the absolute rate $df/dt$ but the
+bond's proportional rate, also called (in the case of a bond) its
+\emph{interest rate,} is $(df/dt)/f(t)$.
The natural logarithmic notation $\ln f(t)$ may not mean much to you yet,
as we'll not introduce it formally until \S~\ref{cexp:225}, so you can
+for we'll not introduce it formally until \S~\ref{cexp:225}, so you can
ignore the right side of~(\ref{drvtv:240.40:10}) for the moment; but the
equation's left side at least should make sense to you. It expresses
the significant concept of a relative rate, like~10 percent annual
+the significant concept of a proportional rate, like~10 percent annual
interest on a bond.
% 
@@ 883,10 +1569,24 @@ If~$f$ is a function of~$w$, which itsel
Equation~(\ref{drvtv:chain}) is the \emph{derivative chain rule.}%
\footnote{
It bears emphasizing to readers who may inadvertently have picked up
 unhelpful ideas about the Leibnitz notation in the past: the~$dw$ factor
 in the denominator cancels the~$dw$ factor in the numerator;
 a thing divided by itself is~1. That's it. There is nothing more to
 the proof of the derivative chain rule than that.
+ unhelpful ideas about the Leibnitz notation in the past: the~$dw$
+ factor in the denominator cancels the~$dw$ factor in the numerator,
+ and a thing divided by itself is~1. On an applied level, this more or
+ less is all there is to it (but see \S~\ref{drvtv:240}). Other than
+ maybe in degenerate cases like~$dw=0$, cases the applied mathematician
+ will treat individually as they come, there is hardly more to the
+ applied proof of the derivative chain rule than this (but
+ see~\cite[Prob.~3.39]{Spiegel}).%
+ %
+ % Am not sure that the rest of this footnote helps. Let's try
+ % commenting it out.
+ %Actually, it is not to $dw=0$ as
+ %such that the objection is raised, but the very notion of the
+ %infinitesimal~$dw$ troubles some Cantorian mathematicians. They have
+ %their reasons, and they tend to prefer to regard the
+ %infinitesimal~$dw$ merely as a convenient informalityas a shorthand
+ %to an implied limit, which of course it isbut this book will not
+ %worry overmuch about such as these.
}
\subsection{The derivative product rule}
@@ 912,9 +1612,11 @@ so, in the limit,
\prod_j \left(f_j(z) + \frac{df_j}{2}\right)
 \prod_j \left(f_j(z)  \frac{df_j}{2}\right).
\]
Since the product of two or more~$df_j$ is negligible compared to the
firstorder infinitesimals to which they are added here, this simplifies
to
+Since the product of two or more~$df_j$ is negligible
+compared to the firstorder infinitesimals to which they are here added,%
+\footnote{Unless~$df_j \approx 0$ to first order, in which case it
+ contributes nothing to the derivative, anyway.}
+this simplifies to
\[
d\left[\prod_j f_j(z)\right] =
\left[ \prod_j f_j(z) \right] \left[\sum_k \frac{df_k}{2f_k(z)} \right]
@@ 932,11 +1634,25 @@ In the common case of only two~$f_j$, th
On the other hand, if $f_1(z) = f(z)$ and $f_2(z) = 1/g(z)$, then by the
derivative chain rule~(\ref{drvtv:chain}), $df_2 = dg/g^2$; so,
\bq{drvtv:proddiv}
 d\left(\frac f g\right) = \frac{g\,df  f\,dg}{g^2}.
+ d\left(\frac f g\right) = \frac{g\,df  f\,dg}{g^2},
+\eq
+and indeed
+\bq{drvtv:proddiv2}
+ d\left(\frac{f^a}{g^b}\right)
+ = \frac{f^{a1}}{g^{b+1}}(ag\,dfbf\,dg).
+\eq
+Similarly,
+\bq{drvtv:prod4}
+ \begin{split}
+ d\left(f_1^{a_1}f_2^{a_2}\right) &=
+ \Big(f_1^{a_11}f_2^{a_21}\Big)(a_1f_2\,df_1+a_2f_1\,df_2)\\
+ &=
+ \Big(f_1^{a_1}f_2^{a_2}\Big)\left(\frac{a_1\,df_1}{f_1}+\frac{a_2\,df_2}{f_2}\right).
+ \end{split}
\eq
Equation~(\ref{drvtv:prod}) is the \emph{derivative product rule.}
After studying the complex exponential in Ch.~\ref{cexp}, we shall stand
+After studying the complex exponential in chapter~\ref{cexp}, we shall stand
in a position to write~(\ref{drvtv:prod}) in the slightly specialized but often
useful form%
\footnote{
@@ 981,12 +1697,17 @@ variable~$z$ is
\frac{d}{dz}[z^af(z)] = z^a\frac{df}{dz} + az^{a1}f(z).
\]
Swapping the equation's left and right sides then dividing through
by~$z^a$ yields
+by~$z^a$ yields that
\bq{drvtv:250:35}
\frac{df}{dz} + a\frac{f}{z} = \frac{d(z^af)}{z^a \,dz},
\eq
a pattern worth committing to memory, emerging among other places in
\S~\ref{vcalc:440}.
+\S~\ref{vcalc:440}.%
+\footnote{\label{drvtv:250:fn1}%
+ This section completes the forward reference of
+ \S~\ref{alggeo:228.40}. See chapter~\ref{alggeo}'s
+ footnote~\ref{alggeo:228:fn2}.
+}
% 
@@ 1051,14 +1772,14 @@ downward, then negative. But the deriva
derivative of the derivative, or second derivative. Hence if $df/dx=0$
at $x=x_o$, then
\bqb
 \left.\frac{d^2f}{dx^2}\right_{x=x_o} &>& 0
+ \left.\frac{d^2\!f}{dx^2}\right_{x=x_o} &>& 0
\ \ \mbox{implies a local minimum at~$x_o$;} \\
 \left.\frac{d^2f}{dx^2}\right_{x=x_o} &<& 0
+ \left.\frac{d^2\!f}{dx^2}\right_{x=x_o} &<& 0
\ \ \mbox{implies a local maximum at~$x_o$.}
\eqb
Regarding the case
\[
 \left.\frac{d^2f}{dx^2}\right_{x=x_o} = 0,
+ \left.\frac{d^2\!f}{dx^2}\right_{x=x_o} = 0,
\]
this might be either a minimum or a maximum but more probably is
neither, being rather a \emph{level inflection point} as depicted in
@@ 1067,7 +1788,7 @@ Fig.~\ref{drvtv:255:fig2}.%
Of course if the first and second derivatives are zero not just at
$x=x_o$ but everywhere, then $f(x) = y_o$ is just a level straight
line, but you knew that already. Whether one chooses to call some
 random point on a level straight line an inflection point or an
+ arbitrary point on a level straight line an inflection point or an
extremum, or both or neither, would be a matter of definition, best
established not by prescription but rather by the needs of the model
at hand.
@@ 1118,7 +1839,7 @@ that
\lim_{z\ra z_o} \frac{f(z)}{g(z)}
= \left. \frac{df/dz}{dg/dz} \right_{z=z_o}.
\eq
In the case where $z=z_o$ is a root, l'H\^opital's rule is
+In the case in which $z=z_o$ is a root, l'H\^opital's rule is
proved by reasoning%
\footnote{
Partly with reference to
@@ 1133,7 +1854,7 @@ proved by reasoning%
= \lim_{z\ra z_o} \frac{df}{dg}
= \lim_{z\ra z_o} \frac{df/dz}{dg/dz}.
\eqb
In the case where $z=z_o$ is a pole, new functions $F(z)\equiv 1/f(z)$
+In the case in which $z=z_o$ is a pole, new functions $F(z)\equiv 1/f(z)$
and $G(z)\equiv 1/g(z)$ of which $z=z_o$ is a root are defined, with
which
\[
@@ 1156,8 +1877,8 @@ Inverting,
= \lim_{z\ra z_o} \frac{df/dz}{dg/dz}.
\]
And if~$z_o$ itself is infinite? Then, whether it represents a root or
a pole, we define the new variable $Z=1/z$ and the new functions
$\Phi(Z)=f(1/Z)=f(z)$ and $\Gamma(Z)=g(1/Z)=g(z)$, with which we
+a pole, we define the new variable $Z\equiv 1/z$ and the new functions
+$\Phi(Z)\equiv f(1/Z)=f(z)$ and $\Gamma(Z)\equiv g(1/Z)=g(z)$, with which we
apply l'H\^opital's rule for $Z\ra 0$ to obtain
\bqb
\lefteqn{
@@ 1188,10 +1909,14 @@ should the occasion arise.%
point let us apply l'H\^opital's rule instead, reducing the ratio
to $\lim_{x\ra 0} 2(x^3+x)(3x^2+1)/2x$, which is still $0/0$.
Applying l'H\^opital's rule again to the result yields $\lim_{x\ra 0}
 2[(3x^2+1)^2+(x^3+x)(6x)]/2 = 2/2 = 1$. Where expressions involving
 trigonometric or special functions (Chs.~\ref{trig}, \ref{cexp} and
+ 2[(3x^2+1)^2+(x^3+x)(6x)]/2 = 2/2 = 1$.
% diagn
 [not yet written])
+ %Where expressions involving trigonometric or special functions
+ %(chapters~\ref{trig}, \ref{cexp} and [not yet written])
+ Where expressions involving trigonometric functions
+ (chapters~\ref{trig} and \ref{cexp})
+ or special functions
+ (mentioned in part~\ref{part30})
appear in ratio, a recursive application of l'H\^opital's rule can be
just the thing one needs.
@@ 1201,17 +1926,15 @@ should the occasion arise.%
}
\index{indeterminate form}
L'H\^opital's rule is used in evaluating indeterminate forms of the
kinds
% bad break
+L'H\^opital's rule is used in evaluating indeterminate forms of the kinds
$0/0$ and $\infty/\infty$, plus related forms like $(0)(\infty)$
which can be recast into either of the two main forms. Good examples of
the use require math from Ch.~\ref{cexp} and later, but if we may borrow
+the use require mathematics from chapter~\ref{cexp} and later, but if we may borrow
from~(\ref{cexp:225:dln}) the natural logarithmic function and its
derivative,%
\footnote{
This paragraph is optional reading for the moment. You can read
 Ch.~\ref{cexp} first, then come back here and read the paragraph if
+ chapter~\ref{cexp} first, then come back here and read the paragraph if
you prefer.
}
\[
@@ 1245,7 +1968,7 @@ broadly applicable method for finding ro
function $f(z)$ of which the root is desired, the NewtonRaphson
iteration is
\bq{drvtv:NR}
 z_{k+1} = \left. z  \frac{f(z)}{\frac{d}{dz}f(z)}\right_{z=z_k}.
+ z_{k+1} = \left[ z  \frac{f(z)}{\frac{d}{dz}f(z)}\right]_{z=z_k}.
\eq
One begins the iteration by guessing the root and calling the guess~$z_0$.
Then~$z_1$, $z_2$, $z_3$, etc., calculated in turn by the
@@ 1290,7 +2013,7 @@ The iteration approximates the curve $f(
Fig.~\ref{drvtv:270:fig1} is a good example of a tangent line.
The relationship between the tangent line and the trigonometric
 tangent function of Ch.~\ref{trig} is slightly obscure, maybe more of
+ tangent function of chapter~\ref{trig} is slightly obscure, maybe more of
linguistic interest than of mathematical. The trigonometric tangent
function is named from a variation on Fig.~\ref{trig:226:f1} in which
the triangle's bottom leg is extended to unit length, leaving the
@@ 1308,7 +2031,7 @@ f_k(x_{k+1}) = 0$:
\]
Solving for $x_{k+1}$, we have that
\[
 x_{k+1} = \left. x  \frac{f(x)}{\frac{d}{dx}f(x)}\right_{x=x_k},
+ x_{k+1} = \left[ x_k  \frac{f(x)}{\frac{d}{dx}f(x)}\right]_{x=x_k},
\]
which is~(\ref{drvtv:NR}) with $x\la z$.
@@ 1358,7 +2081,7 @@ hardly departs from the straight line.
\index{square root!calculation of by NewtonRaphson}
\index{$n$th root!calculation of by NewtonRaphson}
The NewtonRaphson iteration is a champion square root calculator,
+The NewtonRaphson iteration is a champion squareroot calculator,
incidentally. Consider
\[
f(x) = x^2  p,
diff pruN 0.53.201204142/tex/eigen.tex 0.56.20180123.12/tex/eigen.tex
 0.53.201204142/tex/eigen.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/eigen.tex 20180123 03:24:36.000000000 +0000
@@ 14,7 +14,7 @@ This chapter analyzes the eigenvalue and
\emph{eigenvector} it scales.
Before treating the eigenvalue proper, the chapter gathers from
across Chs.~\ref{matrix} through~\ref{eigen} several properties all
+across chapters~\ref{matrix} through~\ref{eigen} several properties all
invertible square matrices share, assembling them in \S~\ref{eigen:370}
for reference. One of these regards the \emph{determinant,} which opens
the chapter.
@@ 25,7 +25,7 @@ the chapter.
\label{eigen:310}
\index{determinant}
Through Chs.~\ref{matrix}, \ref{gjrank} and~\ref{mtxinv} the theory of
+Through chapters~\ref{matrix}, \ref{gjrank} and~\ref{mtxinv} the theory of
the matrix has developed slowly but pretty straightforwardly. Here comes the
first unexpected turn.
@@ 126,7 +126,7 @@ the determinant has exactly~$n!$ terms.
\S\S~\ref{matrix:180.22} and~\ref{matrix:321} and
eqn.~\ref{matrix:321:20}.%
\footnote{
 And further Ch.~\ref{mtxinv}'s footnotes~\ref{mtxinv:245:08}
+ And see further chapter~\ref{mtxinv}'s footnotes~\ref{mtxinv:245:08}
and~\ref{mtxinv:450:08}.
}%
)
@@ 135,13 +135,14 @@ It is admitted%
\footnote{\cite[\S~1.2]{Franklin}}
that we have not, as yet,
actually shown the determinant to be a generally useful quantity; we have
merely motivated and defined it. Historically the determinant probably
+merely motivated and defined it. The true history of the determinant is
+unknown to this writer, but one might suppose that the determinant had originally
emerged not from abstract considerations but for the mundane reason that
the quantity it represents occurred frequently in practice (as in
the~$A_2^{1}$ example above). Nothing however logically prevents one
+the quantity it represents occurs frequently in practice (as in
+the~$A_2^{1}$ of the example above). Nothing however logically prevents one
from simply defining some quantity which, at first, one merely
suspects will later prove useful. So we do here.%
\footnote{\cite[Ch.~1]{Franklin}}
+\footnote{\cite[chapter~1]{Franklin}}
\subsection{Basic properties}
\label{eigen:310.25}
@@ 633,30 +634,30 @@ $4 \times 4$ to $6 \times 6$ or so its s
grow too great and too many for practical calculation. The GaussJordan
technique (or even the GramSchmidt technique) is preferred to invert
concrete matrices above a certain size for this reason.%
\footnote{
+\footnote{%
For very large matrices, even the GaussJordan grows impractical, due
to compound floatingpoint rounding error and the maybe large but
nonetheless limited quantity of available computer memory. Iterative
 techniques
+ techniques,
% diagn
 ([chapter not yet written])
 serve to invert such matrices approximately.
+ regrettably beyond this edition's scope,
+ serve to invert such matrices approximately.%
}
% 
\section{Coincident properties}
+\section{Co\"incident properties}
\label{eigen:370}
\index{coincident properties of the square matrix}
\index{square matrix!coincident properties of}
\index{matrix!square, coincident properties of}
+\index{co\"incident properties of the square matrix}
+\index{square matrix!co\"incident properties of}
+\index{matrix!square, co\"incident properties of}
\index{exact arithmetic}
\index{arithmetic!exact}
Chs.~\ref{matrix}, \ref{gjrank} and~\ref{mtxinv}, plus this chapter up
to the present point, have discovered several coincident properties of
+Chapters~\ref{matrix}, \ref{gjrank} and~\ref{mtxinv}, plus this chapter up
+to the present point, have discovered several co\"incident properties of
the invertible $n \times n$ square matrix. One does not feel the full
impact of the coincidence when these properties are left scattered
+impact of the co\"incidence when these properties are left scattered
across the long chapters; so, let us gather and summarize the properties
here. A square, $n \times n$ matrix evidently has either all of the
following properties or none of them, never some but not others.
@@ 701,7 +702,7 @@ The square matrix which has one of these
The square matrix which lacks one, lacks all. Assuming exact
arithmetic, a square matrix is either invertible, with all that that
implies, or singular; never both. The distinction between invertible
and singular matrices is theoretically as absolute as (and indeed is
+and singular matrices is theoretically as absolute as (and is indeed
analogous to) the distinction between nonzero and zero scalars.
\index{matrix!illconditioned}
@@ 714,7 +715,7 @@ other ways, by its unexpectedly small de
exact arithmetic, a nonzero determinant, no matter how small, implies a
theoretically invertible matrix. Practical matrices however often have
entries whose values are imprecisely known; and even when they don't,
the computers which invert them tend to do arithmetic imprecisely in
+the computers that invert them tend to do arithmetic imprecisely in
floatingpoint. Matrices which live on the hazy frontier between
invertibility and singularity resemble the infinitesimals of
\S~\ref{drvtv:210.001}. They are called \emph{illconditioned}
@@ 781,10 +782,8 @@ matrix. This is what~(\ref{eigen:eigen}
only when~$\ve v$ happens to be the right \emph{eigenvector,} which
\S~\ref{eigen:420} discusses.
% diagn: this paragraph plus the first sentence of the next want more
% review.
Observe incidentally that the characteristic polynomial of an $n\times
n$ matrix always enjoys full order~$n$, regardless of the matrix's rank.
+n$ matrix always enjoys full order~$n$ regardless of the matrix's rank.
The reason lies in the determinant $\det(A\lambda I_n)$, which
comprises exactly~$n!$ determinantterms (we say ``determinantterms''
rather than ``terms'' here only to avoid confusing the determinant's
@@ 820,14 +819,17 @@ Naturally one must solve~(\ref{eigen:eig
to locate the actual eigenvalues. One solves it by the same techniques
by which one solves any polynomial: the quadratic
formula~(\ref{alggeo:240:quad}); the cubic and quartic methods of
Ch.~\ref{cubic}; the NewtonRaphson iteration~(\ref{drvtv:NR}). On the
+chapter~\ref{cubic}; the NewtonRaphson iteration~(\ref{drvtv:NR}). On the
other hand, the determinant~(\ref{eigen:eigdet}) can be impractical to
expand for a large matrix; here iterative techniques help: see
% diagn
[chapter not yet written].%
\footnote{
+expand for a large matrix;
+here iterative techniques\footnote{%
+ % diagn
+ Such iterative techniques are regrettably not treated by this
+ edition.%
+}
+help.\footnote{%
The inexpensive~\cite{Franklin} also covers the topic competently and
 readably.
+ readably.%
}
% 
@@ 848,8 +850,8 @@ the $n$element vectors for which
which is to say that the eigenvectors are the vectors of the kernel
space of the degenerate matrix $[A\lambda I_n]$which one can
calculate (among other ways) by the GaussJordan kernel
formula~(\ref{mtxinv:245:kernel}) or the GramSchmidt kernel
formula~(\ref{mtxinv:460:kernel}).
+formula~(\ref{mtxinv:245:kernel}) or by a method
+exploiting~(\ref{mtxinv:450:perp}).
\index{eigensolution}
An eigenvalue and its associated eigenvector, taken together, are
@@ 895,7 +897,7 @@ among them the following.
\[
\ve v_k = \sum_{j=1}^{k1} c_j \ve v_j,
\]
 then leftmultiplying the equation by $A\lambda_k$ would yield
+ then leftmultiplying the equation by $A\lambda_kI_n$ would yield
\[
0 = \sum_{j=1}^{k1} (\lambda_j\lambda_k)c_j \ve v_j,
\]
@@ 912,7 +914,7 @@ among them the following.
$n$element vector can be expressed as a unique linear combination
of the eigenvectors.} This is a simple consequence of the fact that
the $n \times n$ matrix~$V$ whose columns are the several
 eigenvectors~$\ve v_j$ has full rank $r=n$. Unfortunately some
+ eigenvectors~$\ve v_j$ has full rank $r=n$. Unfortunately, some
matrices with repeated eigenvalues also have repeated
eigenvectorsas for example, curiously,%
\footnote{\cite{math21b}}
@@ 981,7 +983,7 @@ where
}
\]
is an otherwise empty $n \times n$ matrix with the eigenvalues of~$A$
set along its main diagonal and
+set along its main diagonal and where
\[
V = \left[
\br{ccccc}
@@ 993,15 +995,12 @@ is an $n \times n$ matrix whose columns
This is so because the identity $A\ve v_j = \ve v_j \lambda_j$
holds for all $1 \le j \le n$; or, expressed more concisely, because the
identity
\[
+\bq{eigen:423:25}
AV = V\Lambda
\]
holds.%
\footnote{
 If this seems confusing, then consider that the $j$th column of the
 product~$AV$ is~$A\ve v_j$, whereas the $j$th column of~$\Lambda$
 having just the one element acts to scale $V$'s $j$th column only.
}
+\eq
+holds (reason: the $j$th column of the
+product~$AV$ is~$A\ve v_j$, whereas the $j$th column of~$\Lambda$
+having just the one element acts to scale $V$'s $j$th column only).
The matrix~$V$ is invertible because its columns the eigenvectors are
independent, from which~(\ref{eigen:diag}) follows.
Equation~(\ref{eigen:diag}) is called the \emph{eigenvalue
@@ 1072,7 +1071,7 @@ nondiagonalizable matrix vaguely resembl
both represent edge cases and can be hard to handle numerically; but the
resemblance ends there, and a matrix can be either without being the
other. The $n \times n$ null matrix for example is singular but still
diagonalizable. What a nondiagonalizable matrix is in essence is a
+diagonalizable. What a nondiagonalizable matrix is, is, in essence, a
matrix with a repeated eigensolution: the same eigenvalue with the same
eigenvector, twice or more. More formally, a nondiagonalizable matrix
is a matrix with an $n$fold eigenvalue whose corresponding eigenvector
@@ 1088,7 +1087,7 @@ nondiagonalizable matrix.
\index{dominant eigenvalue}
\index{eigenvalue!dominant}
Eigenvalues and their associated eigenvectors stand among the principal
reasons one goes to the considerable trouble to develop matrix theory as
+causes that one should go to such considerable trouble to develop matrix theory as
we have done in recent chapters. The idea that a matrix resembles a
humble scalar in the right circumstance is powerful. Among the reasons
for this is that a matrix can represent an iterative process, operating
@@ 1103,7 +1102,7 @@ eigenvalues of the matrix which describe
case of the nondiagonalizable matrix, which matrix surprisingly covers
only part of its domain with eigenvectors. All this is fairly deep
mathematics. It brings an appreciation of the matrix for reasons which
were anything but apparent from the outset of Ch.~\ref{matrix}.
+were anything but apparent from the outset of chapter~\ref{matrix}.
Remarks continue in \S\S~\ref{eigen:520.30} and~\ref{eigen:900}.
@@ 1376,9 +1375,6 @@ We derive it here for this reason.
\subsection{Derivation}
\label{eigen:520.10}
% At the moment this comment is written, it appears that this lengthy
% subsection merits no index entries. Interesting.

Suppose that%
\footnote{
This subsection assigns various capital Roman letters to represent the
@@ 1393,7 +1389,7 @@ Suppose that%
b$ as $T\ve e = \ve i$, not because the latter is wrong but because it
would be extremely confusing). The Roman alphabet provides only
twentysix capitals, though, of which this subsection uses too many to
 be allowed to reserve any. See Appendix~\ref{greek}.
+ be allowed to reserve any. See appendix~\ref{greek}\@.
}
(for some reason, which will shortly grow clear) we have a
matrix~$B$ of the form
@@ 1496,7 +1492,15 @@ from~(\ref{eigen:520:22}), we have by su
+ H_{i}W_o^{*}H_{i}BH_{i}W_oH_{i}.
\eqb
The unitary submatrix~$W_o$ has only $ni$ columns and $ni$ rows, so
$I_{ni}W_o = W_o = W_oI_{ni}$. Thus,
+$I_{ni}W_o = W_o = W_oI_{ni}$. Thus,\footnote{%
+ The algebra is so thick that, even if one can logically follow it, one
+ might nonetheless wonder how the writer had thought to write it.
+ However, much of the algebra consists of cropandshift operations
+ like $H_iI_{ni}$ which, when a sample matrix is sketched on a sheet
+ of paper, are fairly easy to visualize. Indeed, the whole derivation
+ is more visual than the inscrutable symbols let on. The writer
+ had the visuals in mind.%
+}
\bqb
C &=&
I_{i}BI_{i}
@@ 1548,12 +1552,12 @@ reduced requirement that
(I_nI_i)W^{*}BW(I_nI_i).
\eq
Further leftmultiplying by~$H_{i}$, rightmultiplying by~$H_i$,
and applying the identity~(\ref{matrix:340:61}) yields
+and applying the identity~(\ref{matrix:340:61}) yields that
\bqb
I_{ni}H_{i}CH_{i}I_{ni}
= I_{ni}H_{i}W^{*}BWH_{i}I_{ni};
\eqb
or, substituting from~(\ref{eigen:520:25}),
+or, substituting from~(\ref{eigen:520:25}), that
\[
C_o = I_{ni}H_{i}W^{*}BWH_{i}I_{ni}.
\]
@@ 1572,7 +1576,7 @@ or, since $I_{ni}H_{i}I_{i} = 0 = I_iH
\\&=&
W_o^{*}I_{ni}H_{i}BH_{i}I_{ni}W_o.
\eqb
Per~(\ref{eigen:520:25}), this is
+Per~(\ref{eigen:520:25}), this has that
\bq{eigen:520:40}
C_o = W_o^{*}B_oW_o.
\eq
@@ 1761,7 +1765,7 @@ diagonal of~$U_S$.}%
= \det [U_S  \lambda I_n],
\]
which says that~$A$ and~$U_S$ have not only the same eigenvalues
 but also the same characteristic polynomials, thus further the
+ but also the same characteristic polynomials, and thus further the
same eigenvalue multiplicities.
}
@@ 1794,7 +1798,7 @@ of~$U_S$ to%
where $\ep \ll 1$ and where~$\ve u$ is an arbitrary vector that meets
the criterion given. Though infinitesimally near~$A$, the modified
matrix $A' = QU_S'Q^{*}$ unlike~$A$ has~$n$ (maybe infinitesimally)
distinct eigenvalues. With sufficient toil, one can analyze such
+distinct eigenvalues. With sufficient toil, one might analyze such
perturbed eigenvalues and their associated eigenvectors similarly as
\S~\ref{inttx:260.20} has analyzed perturbed poles.
@@ 1808,7 +1812,7 @@ of the subsection's title. Section~\ref
diagonalization formula~(\ref{eigen:diag}) diagonalize any matrix with
distinct eigenvalues and even any matrix with repeated eigenvalues but
distinct eigenvectors, but fail where eigenvectors repeat.
Equation~(\ref{eigen:520:75}) separates eigenvalues, thus also
+Equation~(\ref{eigen:520:75}) separates eigenvalues, and thus also
eigenvectorsfor according to \S~\ref{eigen:422} eigenvectors of
distinct eigenvalues never depend on one anotherpermitting a
nonunique but still sometimes usable form of diagonalization in the
@@ 1836,10 +1840,10 @@ answer in that form.
\index{Hessenberg matrix}
Generalizing the nondiagonalizability concept leads one eventually to
the ideas of the \emph{generalized eigenvector}%
\footnote{\cite[Ch.~7]{FriedbergIS}}
+\footnote{\cite[chapter~7]{FriedbergIS}}
(which solves the higherorder linear system $[A\lambda I]^k\ve v=0$)
and the \emph{Jordan canonical form,}%
\footnote{\cite[Ch.~5]{Franklin}}
+\footnote{\cite[chapter~5]{Franklin}}
which together roughly track the sophisticated conventional
poleseparation technique of \S~\ref{inttx:260.50}.
Then there is a kind of sloppy Schur form called a Hessenberg form which
@@ 2037,7 +2041,7 @@ $\ve w \neq 0$, we would have by success
\eqb
The last equation claims that $(\mu, V_o^\perp\ve w)$ were an
eigensolution of~$A$, when we had supposed that all of $A$'s
eigenvectors lay in the space addressed by the columns of~$V_o$, thus by
+eigenvectors lay in the space addressed by the columns of~$V_o$, and thus by
construction did not lie in the space addressed by the columns of~$V_o^\perp$. The
contradiction proves false the assumption that gave rise to it. The
assumption: that a nondiagonalizable Hermitian~$A$ existed. We conclude
@@ 2066,7 +2070,7 @@ That is, $A^{*} = A$ as was to be demons
diagonalizable matrices with real eigenvalues and orthogonal
eigenvectors are Hermitian.}
This section brings properties that greatly simplify many kinds of
+This section brings properties that simplify many kinds of
matrix analysis. The properties demand a Hermitian matrix, which might
seem a severe and unfortunate restrictionexcept that one can
leftmultiply any exactly determined linear system $C\ve x = \ve d$
@@ 2099,7 +2103,7 @@ obtain.%
Occasionally an elegant idea awaits discovery, overlooked, almost in
plain sight. If the unlikely thought occurred to you to take the square
root of a matrix, then the following idea is one you might discover.%
\footnote{\cite[``Singular value decomposition,'' 14:29, 18~Oct. 2007]{wikip}}
+\footnote{\cite[``Singular value decomposition,'' 14:29, 18~Oct.\ 2007]{wikip}}
\index{singularvalue matrix}
\index{matrix!singularvalue}
@@ 2141,7 +2145,7 @@ of the eigenvalue matrix~$\Lambda$ such
}, \xn
\eqa
where the \emph{singular values} of~$A$ populate $\Sigma$'s diagonal.
Applying~(\ref{eigen:600:15}) to~(\ref{eigen:600:10}) then yields
+Applying~(\ref{eigen:600:15}) to~(\ref{eigen:600:10}) then yields that
\bq{eigen:600:20}
\begin{split}
A^{*}A &= V\Sigma^{*}\Sigma V^{*}, \\
diff pruN 0.53.201204142/tex/fouri.tex 0.56.20180123.12/tex/fouri.tex
 0.53.201204142/tex/fouri.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/fouri.tex 20180123 23:40:02.000000000 +0000
@@ 6,11 +6,14 @@
\index{Fourier, Jean Baptiste Joseph\\(17681830)} % bad break
\index{Laplace, PierreSimon (17491827)}
The Fourier series of Ch.~\ref{fours} though quite useful applies solely to
waveforms that repeat. An effort to extend the Fourier series to
the broader domain of nonrepeating waveforms leads to the \emph{Fourier
transform,} this chapter's chief subject. [This chapter is yet only a
rough draft.]
+The Fourier series of chapter~\ref{fours} is most useful. Its
+applications are extensive. However, the Fourier series applies solely
+to waveforms that repeator, at most, to waveforms that can be framed
+as though they repeated.
+
+An effort to extend the Fourier series to the broader domain of
+nonrepeating waveforms leads to the \emph{Fourier transform,} this
+chapter's chief subject.
% 
@@ 19,10 +22,7 @@ rough draft.]
\index{Fourier transform}
\index{transform!Fourier}
This section derives and presents the Fourier transform, extending the
% bad break
\linebreak
Fourier series.
+This section extends the Fourier series to derive the Fourier transform.
\subsection{Fourier's equation}
\label{fouri:100.10}
@@ 84,20 +84,20 @@ following way. First, convert the pulse
\[
g(t) \equiv \sum_{n=\infty}^\infty f(tnT_1),
\]
which naturally does repeat.%
\footnote{
+which naturally does repeat,%
+\footnote{%
One could divert rigorously from this point to consider formal
 requirements against $f(t)$ but it suffices that $f(t)$ be
 sufficiently limited in extent that $g(t)$ exist for all $\Re(T_1)
+ requirements against $f(t)$ but for applications it probably suffices that
+ $f(t)$ be limited enough in extent that $g(t)$ exist for all $\Re(T_1)
> 0$, $\Im(T_1) = 0$. Formally, such a condition would forbid a
 function like $f(t) = A \cos \omega_o t$, but one can evade this
+ function like $f(t) = A \cos \omega_o t$, but one can evade the
formality, among other ways, by defining the function as $f(t) =
 \lim_{T_2\ra\infty} \Pi(t/T_2) A \cos \omega_o t$, where $\Pi(t)$ is
 the rectangular pulse of~(\ref{fours:095:10}).

 We will leave to the professionals further consideration of
 formal requirements.
+ \lim_{T_2\ra\infty} \Pi(t/T_2) A \cos \omega_o t$, the $\Pi(\cdot)$
+ being the rectangular pulse of~(\ref{fours:095:10}).
+ %even if (as would probably be the case) $T_2 \gg T_1$.
+ Other pulses of \S~\ref{fours:095} might suit, as well.%
}
+where~$T_1 > 0$ is an arbitrary period of repetition whose value you can choose.
Second, by~(\ref{fours:100:15}), calculate the Fourier coefficients of
this pulse train $g(t)$. Third, use these coefficients in the Fourier
series~(\ref{fours:100:10}) to reconstruct
@@ 125,8 +125,11 @@ the train the original pulse
\,d\tau
\right]
e^{ij \,\Delta\omega\, t}
 \right\};
+ \right\}
\]
+[in which we have replaced $g(\tau)$ by $f(\tau)$, supposing that $T_1$
+has grown great enough to separate the several instances $f(\taunT_1)$
+of which, according to definition, $g(\tau)$ is composed];
or, observing per~(\ref{fours:080:08}) that $\Delta\omega\,T_1 = 2\pi$
and reordering factors,
\[
@@ 141,7 +144,7 @@ and reordering factors,
\,d\tau
\right] \,\Delta\omega.
\]
Fifth, defining the symbol $\omega \equiv j\,\Delta\omega$ observe
+Fifth, defining the symbol $\omega \equiv j\,\Delta\omega$, observe
that the summation is really an integration in the limit, such that
\bq{fouri:eqn}
f(t) =
@@ 164,10 +167,11 @@ result.
\index{Fourier transform!inverse}
The reader may agree that Fourier's equation~(\ref{fouri:eqn}) is
curious, but in what way is it remarkable? To answer, let us observe
that the quantity in (\ref{fouri:eqn})'s square braces,
+curious, but in what way is it remarkable? the reader might ask.
+To answer, let us observe
+that the quantity within the square braces of~(\ref{fouri:eqn}),
\bq{fouri:xform}
 F(\omega) = \mathcal{F}\left\{f(t)\right\} \equiv
+ F(\omega) \equiv \mathcal{F}\left\{f(t)\right\} \equiv
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
e^{i\omega\tau}
@@ 176,10 +180,10 @@ that the quantity in (\ref{fouri:eqn})'s
\eq
is a function not of~$t$ but rather of~$\omega$. We conventionally give
this function the capitalized symbol $F(\omega)$ and name it the
\emph{Fourier transform} of $f(t)$, introducing also the useful
notation $\mathcal{F}\{\cdot\}$ (where the script letter~$\mathcal{F}$
stands for ``Fourier'' and is only coincidentally, unfortunately, the
same letter here as~$f$ and~$F$) as a short form to represent the
+\emph{Fourier transform} of $f(t)$, introducing also the
+notation $\mathcal{F}\{\cdot\}$ (where the script letter~$\mathcal{F}$,
+which stands for ``Fourier,'' is only accidentally the same letter
+as~$f$ and~$F$) as a short form to represent the
transformation~(\ref{fouri:xform}) serves to define.
Substituting~(\ref{fouri:xform}) into~(\ref{fouri:eqn}) and changing
$\eta \la \omega$ as the dummy variable of integration, we have that
@@ 203,10 +207,10 @@ transform's complementary equations~(\re
and~(\ref{fouri:xform}) are but continuous versions of the earlier
complementary equations~(\ref{fours:100:10}) and~(\ref{fours:100:15}) of
the discrete Fourier series. The transform finds even wider application
than does the series.%
+than the series does.%
\footnote{
Regrettably, several alternate definitions and usages of the Fourier
 series are broadly current in the writer's country alone. Alternate
+ series are current. Alternate
definitions~\cite{Phillips/Parr}\cite{Couch} handle the factors of
$1/\sqrt{2\pi}$ differently. Alternate usages~\cite{Feynman} change
$i\la i$ in certain circumstances. The essential Fourier mathematics
@@ 283,7 +287,7 @@ Fig.~\ref{fouri:100:fig}.
\subsection{The complementary variables of transformation}
\label{fouri:100.30}
\index{transformation, variable of}
+\index{transformation!variable of}
\index{variable of transformation}
\index{complementary variables of transformation, the}
@@ 303,9 +307,9 @@ mutually independent variables~$\omega$
\index{Fourier transform pair}
\index{transform pair}
Formally, one can use any two letters in place of~$t$
and~$\omega$; and indeed one need not even use two different letters,
for it is sometimes easier just to write
+Formally, one can use any two letters in place of the~$\omega$
+and~$t$; and indeed one need not even use two different letters,
+for it is sometimes easier just to write,
\bq{fouri:byu}
\begin{split}
F(v) = \mathcal{F}\left\{f(v)\right\} &=
@@ 326,7 +330,8 @@ for it is sometimes easier just to write
in which the~$\theta$ is in itself no variable of transformation but
only a dummy variable. To emphasize the distinction between the
untransformed and transformed (respectively typically time and
frequency) domains, however, one can instead write
+frequency) domains, however, scientists and engineers tend to
+style~(\ref{fouri:byu}) as
\bq{fouri:byu2}
\begin{split}
F(\omega) = \mathcal{F}\left\{f(t)\right\} &=
@@ 344,20 +349,18 @@ frequency) domains, however, one can ins
\mathcal{F} &\equiv \mathcal{F}_{\omega t},
\end{split}
\eq
where~(\ref{fouri:byu2}) is just~(\ref{fouri:xform})
and~(\ref{fouri:invxform}) together with appropriate changes of dummy
variable. Notice here the usage of the symbol~$\mathcal F$,
incidentally. As clarity demands, one can elaborate
the~$\mathcal{F}$here or wherever else it
appearsas~$\mathcal{F}_{vv}$, $\mathcal{F}_{\omega t}$ or the like to
identify the complementary variables of transformation explicitly.
The unadorned symbol~$\mathcal{F}$ however usually acquits itself
clearly enough in context (refer to \S~\ref{hex:270.2}).
+which are just~(\ref{fouri:xform})
+and~(\ref{fouri:invxform}) together with their dummy variables
+\linebreak % bad break
+changed.
+For precision of specification, one can affix subscripts as shown:
+$\mathcal F_{vv}$; $\mathcal F_{\omega t}$. However, the
+unadorned~$\mathcal{F}$ is normally clear enough in context.)
Whichever letter or letters might be used for the independent variable,
the functions
\bq{fouri:100:30}
 f(v) \stackrel{\mathcal F}{\ra} F(v)
+ f(v) \fouripair F(v)
\eq
constitute a \emph{Fourier transform pair.}
@@ 384,10 +387,9 @@ to~(\ref{fouri:byu}) is
\int_{0}^{1} e^{iv\theta} (1\theta) \,d\theta
\right\}.
\eqb
According to Table~\ref{inttx:470:tbl} (though it is easy enough to
figure it out without recourse to the table),
$\theta e^{iv\theta} = [d/d\theta] [ e^{iv\theta}(1+iv\theta)/v^2 ]$; so,
continuing,
+Evaluating the integrals according to Table~\ref{inttx:470:tbl}'s
+antiderivative that $\theta e^{iv\theta} = [d/d\theta] [
+e^{iv\theta}(1+iv\theta)/v^2 ]$,
\bqb
\mathcal{F}\left\{\Lambda(v)\right\} &=&
\frac{1}{v^2\sqrt{2\pi}} \bigg\{
@@ 407,13 +409,18 @@ of~(\ref{fours:160:10}). Thus we find t
\index{square pulse!Fourier transform of}
\index{Fourier transform!of a square pulse}
One can compute other Fourier transforms in
like manner, such as that
\bq{fouri:100:44}
 \Pi(v) \fouripair \frac{\sinarg(v/2)}{\sqrt{2\pi}}.
\eq
and yet further transforms by the duality rule and the other properties
of \S~\ref{fouri:110}.
+One can compute other Fourier transform pairs in
+like manner, such as that\footnote{%
+ To verify~(\ref{fouri:100:44}) and~(\ref{fouri:100:45}) is left as
+ an exercise. Hint toward~(\ref{fouri:100:45}): $\sin(v\pm\pi) =
+ \sin v$.%
+}%
+\begin{eqnarray}
+ \Pi(v) &\fouripair& \frac{\sinarg(v/2)}{\sqrt{2\pi}},\label{fouri:100:44}\\
+ \Psi(v) &\fouripair& \frac{\sinarg v}{\sqrt{2\pi}\left[1(v/\pi)^2\right]}.\label{fouri:100:45}
+\end{eqnarray}
+One can compute yet further transform pairs by the duality rule and
+other properties of \S~\ref{fouri:110}.
% 
@@ 421,203 +428,22 @@ of \S~\ref{fouri:110}.
\label{fouri:110}
\index{Fourier transform!properties of}
The Fourier transform obeys an algebra of its own, exhibiting several
broadly useful properties one might grasp to wield the transform
effectively. This section derives and lists the properties.

\subsection{Duality}
\label{fouri:110.10}
\index{duality}
\index{Fourier transform!dual of}
+The Fourier transform obeys an algebra of its own, exhibiting properties
+the mathematician can exploit to extend the transform's reach. This
+section derives and lists several such properties.
\index{Fourier transform!reversing the independent variable of}
Changing $v \la v$ makes (\ref{fouri:byu})'s second line to read
\[
 f(v) =
 \frac{1}{\sqrt{2\pi}}
 \int_{\infty}^{\infty}
 e^{iv\theta}
 F(\theta)
 \,d\theta.
\]
However, according to (\ref{fouri:byu})'s first line, this says neither
more nor less than that
\bq{fouri:110:10}
 F(v) \stackrel{\mathcal F}{\ra} f(v),
\eq
which is that the transform of the transform is the original function
with the independent variable reversed, an interesting and useful
property. It is entertaining,
%\footnote{
% At least for those of us who are easily entertained!
%}
and moreover enlightening, to combine~(\ref{fouri:100:30})
and~(\ref{fouri:110:10}) to form the endless transform progression
\bq{fouri:110:12}
 \cdots
 \stackrel{\mathcal F}{\ra} f(v)
 \stackrel{\mathcal F}{\ra} F(v)
 \stackrel{\mathcal F}{\ra} f(v)
 \stackrel{\mathcal F}{\ra} F(v)
 \stackrel{\mathcal F}{\ra} f(v)
 \stackrel{\mathcal F}{\ra} \cdots
\eq
%A sequence of four successive transformations apparently recovers the
%original function.%
%\footnote{
% Fourier transformation resembles multiplication by~$i$ in this
% respect. (Tangentially, see also Ch.~\ref{cubic}'s
% footnote~\ref{cubic:250:09}.)
%}
Equation~(\ref{fouri:110:10}), or alternately~(\ref{fouri:110:12}),
expresses the Fourier transform's \emph{duality} rule.

\index{compositional duality}
\index{duality!compositional}
\index{Fourier transform!compositional dual of}
The Fourier transform evinces duality in another guise too,
\emph{compositional duality,} expressed abstractly as
\bq{fouri:110:14}
 \begin{split}
 g[v,f(h_g(v))] &\fouripair G[v,F(h_G(v))], \\
 G[v,f(h_G(v))] &\fouripair g[v,F(h_g(v))].
 \end{split}
\eq
This is best introduced by example. Consider the Fourier pair
$\Lambda(v) \fouripair \sinarg^2[v/2]/\sqrt{2\pi}$ mentioned in
\S~\ref{fouri:100.40}, plus the Fourier identity $f(va) \fouripair
e^{iav}F(v)$ which we have not yet met but will in
\S~\ref{fouri:110.30} below. Identifying $f(v)=\Lambda(v)$ and
$F(v)=\sinarg^2[v/2]/\sqrt{2\pi}$, the identity extends the pair to
$\Lambda(va) \fouripair e^{iav}\sinarg^2[v/2]/\sqrt{2\pi}$.
On the other hand, recognizing
$h_g(v) = va$, $g[v,(\cdot)] = (\cdot)$, $h_G(v)=v$, and
$G[v,(\cdot)]=(e^{iav})(\cdot)$, eqn.~(\ref{fouri:110:14}) converts
the identity to its compositional dual
$e^{iav}f(v) \fouripair F(v+a)$, which in turn extends the pair to
$e^{iav}\Lambda(v) \fouripair \sinarg^2[(v+a)/2]/\sqrt{2\pi}$. Note
incidentally that the direct dual of the original pair
per~(\ref{fouri:110:12}) is the pair $\sinarg^2[v/2]/\sqrt{2\pi}
\fouripair \Lambda(v)$ which, since it happens that
$\Lambda(v)=\Lambda(v)$, is just the pair $\sinarg^2[v/2]/\sqrt{2\pi}
\fouripair \Lambda(v)$; but that we need neither the identity
nor~(\ref{fouri:110:14}) to determine this.

\index{formal pair}
\index{Fourier transform pair!formal}
\index{transform pair!formal}
So, assuming that~(\ref{fouri:110:14}) is correct, it does seem useful;
but is it correct? To show that it is, take the direct dual on~$v$ of
(\ref{fouri:110:14})'s first line to get the formal pair
\[
 G[v,F(h_G(v))] \fouripair g[v,f(h_g(v))],
\]
then change the symbols $\phi \la f$ and $\Phi \la F$ to express the
same formal pair as
\bq{fouri:110:15}
 G[v,\Phi(h_G(v))] \fouripair g[v,\phi(h_g(v))].
\eq
Now, this as we said is merely a formal pair, which is to say that it
represents no functions in particular but presents a pattern to which
functions can be fitted. Therefore, $\phi([\cdot])$ might represent any
function so long as $\Phi([\cdot])$ were let to represent the same
function's Fourier transform on~$[\cdot]$, as%
\footnote{
 To be symbolically precise, the~$\mathcal F$ here is $\mathcal
 F_{[\cdot][\cdot]}$, such that
 \[
 \begin{split}
 \phi([\cdot]) &\stackrel{\mathcal{F}_{[\cdot][\cdot]}}{\rightarrow} \Phi([\cdot]), \\
 F([\cdot]) &\stackrel{\mathcal{F}_{[\cdot][\cdot]}}{\rightarrow} f([\cdot]);
 \end{split}
 \]
 whereas the~$\mathcal F$ in the formal pairs was~$\mathcal F_{vv}$,
 such that
 \[
 \begin{split}
 g[v,f(h_g(v))] &\stackrel{\mathcal{F}_{vv}}{\rightarrow} G[v,F(h_G(v))], \\
 G[v,f(h_G(v))] &\stackrel{\mathcal{F}_{vv}}{\rightarrow} g[v,F(h_g(v))].
 \end{split}
 \]
 Refer to \S~\ref{fouri:100.30}.
}
\[
 \phi([\cdot]) \fouripair \Phi([\cdot]).
\]
Suppose some particular function $f([\cdot])$ whose Fourier transform
on~$[\cdot]$ is $F([\cdot])$, for the two of which there must existby
direct duality thrice on~$[\cdot]$the Fourier pair
\[
 F([\cdot]) \fouripair f([\cdot]).
\]
Let us define
\[
 \Phi([\cdot]) \equiv f([\cdot]),
\]
whose inverse Fourier transform on~$[\cdot]$, in view of the foregoing,
cannot but be
\[
 \phi([\cdot]) \equiv F([\cdot]);
\]
then observe that substituting these two, complementary definitions
together into the formal pair~(\ref{fouri:110:15}) yields
(\ref{fouri:110:14})'s second line, completing the proof.

Once the proof is understood,~(\ref{fouri:110:14}) is readily
extended to
\bq{fouri:110:16}
 \begin{split}
 g[v,f_1(h_{g1}(v)),f_2(h_{g2}(v))] &\fouripair G[v,F_1(h_{G1}(v)),F_2(h_{G2}(v))], \\
 G[v,f_1(h_{G1}(v)),f_2(h_{G2}(v))] &\fouripair g[v,F_1(h_{g1}(v)),F_2(h_{g2}(v))];
 \end{split}
\eq
and indeed generalized to
%\footnote{
% The notation of~(\ref{fouri:110:17}) is more abstract than one would
% like but once you grasp the semantics (which admittedly is not trivial
% to do) you will see the necessity of the abstraction.
%}
\bq{fouri:110:17}
 \begin{split}
 g[v,f_k(h_{gk}(v))] &\fouripair G[v,F_k(h_{Gk}(v))], \\
 G[v,f_k(h_{Gk}(v))] &\fouripair g[v,F_k(h_{gk}(v))],
 \end{split}
\eq
in which $g[v,f_k(h_{gk}(v))]$ means
$g[v,f_1(h_{g1}(v)),f_2(h_{g2}(v)),f_3(h_{g3}(v)),\ldots]$.

Table~\ref{fouri:110:tbl10} summarizes.
\begin{table}
 \caption[Fourier duality rules.]{Fourier duality rules. (Observe that
 the compositional rules, the table's several rules involving~$g$,
 transform only properties valid for all $f[v]$.)}
 \label{fouri:110:tbl10}
 \bqb
 f(v) &\fouripair& F(v)
 \\
 F(v) &\fouripair& f(v)
 \\
 f(v) &\fouripair& F(v)
 \\
 F(v) &\fouripair& f(v)
 \\&&\\
 g[v,f(h_g(v))] &\fouripair& G[v,F(h_G(v))]
 \\
 G[v,f(h_G(v))] &\fouripair& g[v,F(h_g(v))]
 \\&&\\
 g[v,f_1(h_{g1}(v)),f_2(h_{g2}(v))] &\fouripair& G[v,F_1(h_{G1}(v)),F_2(h_{G2}(v))]
 \\
 G[v,f_1(h_{G1}(v)),f_2(h_{G2}(v))] &\fouripair& g[v,F_1(h_{g1}(v)),F_2(h_{g2}(v))]
 \\&&\\
 g[v,f_k(h_{gk}(v))] &\fouripair& G[v,F_k(h_{Gk}(v))]
 \\
 G[v,f_k(h_{Gk}(v))] &\fouripair& g[v,F_k(h_{gk}(v))]
 \eqb
\end{table}

\subsection{Real and imaginary parts}
+\subsection{Symmetries of the real and imaginary parts}
\label{fouri:110.15}
+In Fig.~\ref{fouri:100:figt}, page~\pageref{fouri:100:figt}, each of the
+real and imaginary parts of the Fourier transform is
+symmetrical (or at least each \emph{looks} symmetrical), though the
+imaginary symmetry differs from the real. This subsection
+analytically develops the symmetries.
+
+\index{dummy variable}
+\index{conjugate!Fourier transform of}
+\index{conjugate!Fourier transform of}
The Fourier transform of a function's conjugate according
to~(\ref{fouri:byu}) is
\[
@@ 626,33 +452,53 @@ to~(\ref{fouri:byu}) is
f^{*}(\theta)\, d\theta
= \left[ \frac{1}{\sqrt{2\pi}} \int_{\infty}^\infty e^{iv^{*}\theta} f(\theta)\, d\theta\right]^{*},
\]
in which we have taken advantage of the fact that the dummy
+in which we have taken advantage of the fact that the integrand's dummy
variable $\theta = \theta^{*}$ happens to be real.
This implies by~(\ref{fouri:byu}) and~(\ref{fouri:110:12}) that
+On the other hand, within the paragraph's first equation just above,
+\[
+ \frac{1}{\sqrt{2\pi}} \int_{\infty}^\infty e^{iv^{*}\theta}
+ f(\theta)\, d\theta =
+ \frac{1}{\sqrt{2\pi}} \int_{\infty}^\infty e^{i(v^{*})\theta}
+ f(\theta)\, d\theta,
+\]
+whereas according to~(\ref{fouri:byu}),
+\bqb
+ \frac{1}{\sqrt{2\pi}} \int_{\infty}^\infty e^{iv^{*}\theta}
+ f(\theta)\, d\theta &=& \mathcal{F}^{1}\{f(v^{*})\},\\
+ \frac{1}{\sqrt{2\pi}} \int_{\infty}^\infty e^{i(v^{*})\theta}
+ f(\theta)\, d\theta &=& \mathcal{F}\{f(v^{*})\};
+\eqb
+so the paragraph's first equation has that
\bq{fouri:110:18}
\mathcal{F}\{f^{*}(v)\}
= \mathcal{F}^{*}\{f(v^{*})\} = \mathcal{F}^{*}\{f(v^{*})\},
\eq
where the symbology $\mathcal{F}^{*}\{\cdot\} \equiv
[\mathcal{F}^{1}\{\cdot\}]^{*}$ is used and
$
 \mathcal{F}^{1}\{g(w)\}
 =
 % bad break
 \linebreak
 \mathcal{F}\{\mathcal{F}^{1}\{\mathcal{F}^{1}\{g(w)\}\}\}
 = \mathcal{F}\{g(w)\}
$.
In the arrow notation, it implies that%
+in which the symbology $\mathcal{F}^{*}\{\cdot\} \equiv
+[\mathcal{F}^{1}\{\cdot\}]^{*}$ is used.
+% Upon review, I am unsure what the rest of the sentence was for. It
+% is commented out for now.
+%and
+%$
+% \mathcal{F}^{1}\{g(w)\}
+% =
+% \linebreak % bad break
+% \mathcal{F}\{\mathcal{F}^{1}\{\mathcal{F}^{1}\{g(w)\}\}\}
+% = \mathcal{F}\{g(w)\}
+%$.
+In the arrow notation, since
+$F(\cdot) = \mathcal{F}\{f(\cdot)\}$,
+and thus $F^{*}(\cdot) = \mathcal{F}^{*}\{f(\cdot)\}$,
+even for $(\cdot) = (v^{*})$, it implies that%
\footnote{
From past experience with complex conjugation, an applied
mathematician might naturally have expected of~(\ref{fouri:110:18a})
 that $f^{*}(v) \fouripair F^{*}(v)$, but this natural expectation
 would actually have been incorrect. Readers whom
 this troubles might consider that, unlike most of the book's
 mathematics before Ch.~\ref{fours}, eqns.~(\ref{fours:100:10})
+ that $f^{*}(v) \fouripair F^{*}(v)$, but the expectation though
+ natural would have been incorrect.
+ Unlike most of the book's
+ mathematics before chapter~\ref{fours}, eqns.~(\ref{fours:100:10})
and~(\ref{fours:100:15})and thus ultimately also the Fourier
 transform's definition~(\ref{fouri:byu})have arbitrarily chosen a
+ transform's definition~(\ref{fouri:xform})
+ or~(\ref{fouri:byu})have arbitrarily chosen a
particular sign for the~$i$ in the phasing factor
$e^{ij\,\Delta\omega\,\tau}$ or $e^{iv\theta}$, which phasing factor
the Fourier integration bakes into the transformed function $F(v)$, so
@@ 665,7 +511,10 @@ In the arrow notation, it implies that%
and~\ref{fouri:100:figt}.
}
\bq{fouri:110:18a}
 f^{*}(v) \fouripair F^{*}(v^{*}).
+ \begin{split}
+ f^{*}(v) &\fouripair F^{*}(v^{*});\\
+ f^{*}(t) &\fouripair F^{*}(\omega^{*}).
+ \end{split}
\eq
If we express the real and imaginary parts of $f(v)$ in the style
@@ 680,7 +529,7 @@ then the Fourier transforms of these par
to~(\ref{fouri:110:18a}) are%
\footnote{
The precisely orderly reader might note that a forward reference
 to Table~\ref{fouri:110:tbl20} is here implied, but the property
+ to Table~\ref{fouri:110:tbl20} is here implied; but the property
referred to, Fourier superposition $A_1f_1(v) + A_2f_2(v) \fouripair
A_1F_1(v) + A_2F_2(v)$, which does not depend on this subsection's
results anyway, is so trivial to prove that we will not bother about
@@ 692,64 +541,96 @@ to~(\ref{fouri:110:18a}) are%
\Im[f(v)] &\fouripair \frac{F(v)  F^{*}(v^{*})}{i2}.
\end{split}
\eq
For real~$v$ and an $f(v)$ which itself is real for all real~$v$, the
+For real~$v$ and an $f(v)$ which is itself real for all real~$v$, the
latter line becomes
\[
0 \fouripair \frac{F(v)  F^{*}(v)}{i2}
\ \ \mbox{if $\Im(v) = 0$ and, for all such~$v$, $\Im[f(v)] = 0$,}
\]
whereby
+whereby\footnote{%
+ A professional mathematician might object that we had
+ never established a onetoone correspondence between a function and
+ its transform. On the other hand, recognizing the applied spirit of
+ the present work, the professional might waive the objectionif not
+ with pleasure, then at least with a practical degree of
+ indulgencebut see also \S~\ref{fours:100.80}.%
+}
\bq{fouri:110:19b}
F(v) = F^{*}(v)
\ \ \mbox{if $\Im(v) = 0$ and, for all such~$v$, $\Im[f(v)] = 0$.}
\eq
Interpreted,~(\ref{fouri:110:19b}) says for real~$v$ and $f(v)$ that the
plot of $\Re[F(v)]$ is symmetric about the vertical axis whereas
the plot of $\Im[F(v)]$ is symmetric about the origin, as
+plot of $\Re[F(v)]$ must be symmetric about the vertical axis whereas
+the plot of $\Im[F(v)]$ must be symmetric about the origin, as
Fig.~\ref{fouri:100:figt} has illustrated.
Table~\ref{fouri:110:tbl15} summarizes.
\begin{table}
 \caption{Real and imaginary parts of the Fourier transform.}
 \label{fouri:110:tbl15}
 \bqb
 f^{*}(v) &\fouripair& F^{*}(v^{*}) \\
 \Re[f(v)] &\fouripair& \frac{F(v) + F^{*}(v^{*})}{2} \\
 \Im[f(v)] &\fouripair& \frac{F(v)  F^{*}(v^{*})}{i2}
 \eqb
 \bc
 If $\Im(v) = 0$ and, for all such~$v$, $\Im[f(v)] = 0$, then
 \ec
 \[
 F(v) = F^{*}(v).
 \]
\end{table}
+\subsection{Duality}
+\label{fouri:110.10}
+\index{duality}
+\index{Fourier transform!dual of}
%Before continuing to the next subsection to study further Fourier
%properties we should caution thatthough the properties of
%Table~\ref{fouri:110:tbl15} are indeed validthe meaning and, indeed,
%the proper existence of the Fourier transform are not necessarily
%obvious when~$v$ is complex. Subtleties can arise, depending on
%the function being transformed. For instance, after reading
%\S\S~\ref{fouri:110.20} and~\ref{fouri:110.30} below, one might consider
%the Fourier transform of $f(u) = e^{iav}$ for complex~$v$. Then, after
%reading
%[section not yet written]%
%,
%one might consider representing the Dirac delta and its transform as the
%limit of a Gaussian pulse. None of this is easy. The peculiar form
%of~(\ref{fouri:110:18a}) is a sign that Fourier mathematics give
%complex arguments an aspect not seen earlier in the book. The Fourier
%transform most often arises in applications with real~$v$ for this
%reason among others.
%
%Section
%[not yet written]
%will introduce the Laplace transform, a variant of the Fourier transform
%meant especially, systematically to handle complex~$v$. Students new to
%the Fourier transform, if confused by the complex case, might limit
%their consideration of the topic to the real case at least until after
%learning Laplace.
+\index{Fourier transform!reversing the independent variable of}
+Changing $v \la v$ makes (\ref{fouri:byu})'s second line to read
+\[
+ f(v) =
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ e^{iv\theta}
+ F(\theta)
+ \,d\theta.
+\]
+However, according to (\ref{fouri:byu})'s
+% or (\ref{fouri:byu2})'s?
+first line, this says neither more nor less than that
+\bq{fouri:110:10}
+ \begin{split}
+ F(v) &\fouripair f(v),\\
+ F(t) &\fouripair f(\omega),
+ \end{split}
+\eq
+which is that the transform of the transform is the original function
+with the independent variable reversed, an interesting and useful
+property. It is entertaining,
+and moreover enlightening, to combine~(\ref{fouri:100:30})
+and~(\ref{fouri:110:10}) to form the endless transform progression
+\bq{fouri:110:12}
+ \cdots
+ \fouripair f(v)
+ \fouripair F(v)
+ \fouripair f(v)
+ \fouripair F(v)
+ \fouripair f(v)
+ \fouripair \cdots
+\eq
+Equation~(\ref{fouri:110:10}) or alternately~(\ref{fouri:110:12})
+expresses the Fourier transform's property of \emph{duality.}
+
+For an example of duality, recall that \S~\ref{fouri:100.40} has
+computed the transform pairs
+\[
+ \begin{split}
+ \Pi(v) &\fouripair \frac{\sinarg(v/2)}{\sqrt{2\pi}}, \\
+ \Lambda(v) &\fouripair \frac{\sinarg^2(v/2)}{\sqrt{2\pi}}, \\
+ \Psi(v) &\fouripair \frac{\sinarg v}{\sqrt{2\pi}\left[1(v/\pi)^2\right]}.
+ \end{split}
+\]
+Application of~(\ref{fouri:110:10}) yields the additional, dual
+transform pairs
+\bq{fouri:110:15}
+ \begin{split}
+ \frac{\sinarg(v/2)}{\sqrt{2\pi}} &\fouripair \Pi(v), \\
+ \frac{\sinarg^2(v/2)}{\sqrt{2\pi}} &\fouripair \Lambda(v), \\
+ \frac{\sinarg v}{\sqrt{2\pi}\left[1(v/\pi)^2\right]} &\fouripair \Psi(v),
+ \end{split}
+\eq
+in which that $\Pi(v)=\Pi(v)$, $\Lambda(v)=\Lambda(v)$ and
+$\Psi(v)=\Psi(v)$ are
+observed (but eqn.~\ref{fouri:110:10} works as well on pulses that lack
+such symmetry). Without duality, to use~(\ref{fouri:byu}) to compute
+the transform pairs of~(\ref{fouri:110:15}) might have been hard, but
+with duality it's pretty easy, as you see.
+(Section~\ref{fouri:120} will further improve eqn.~\ref{fouri:110:15},
+incidentally.)
\subsection{The Fourier transform of the Dirac delta}
\label{fouri:110.20}
@@ 760,12 +641,14 @@ Table~\ref{fouri:110:tbl15} summarizes.
\index{constant!Fourier transform of}
\index{Fourier transform!of a constant}
\index{$1$ (one)!Fourier transform of}
+\index{Dirac delta function!sifting property of}
+\index{delta function, Dirac!sifting property of}
Section~\ref{fouri:120}
will compute several Fourier transform pairs but \S~\ref{fouri:110.30}
will need one particular pair and its dual sooner, so let us pause to
compute these now. Applying~(\ref{fouri:byu}) to the Dirac
delta~(\ref{integ:670:20}) and invoking its sifting
property~(\ref{integ:670:sift}), we find curiously that
+will compute several Fourier transform pairs but one particular pair
+is so significant, so illustrative, so curious, and so easy to compute
+that we will pause to compute it and its dual now.
+Applying~(\ref{fouri:byu}) to the Dirac delta~(\ref{integ:670:20}) and
+invoking its sifting property~(\ref{integ:670:sift}), we find that
\bq{fouri:110:20}
\delta(v) \fouripair \frac{1}{\sqrt{2\pi}},
\eq
@@ 775,89 +658,273 @@ the dual of which according to~(\ref{fou
\left(\sqrt{2\pi}\right) \delta(v)
\eq
inasmuch as $\delta(v) = \delta(v)$.
(The duality rule proves its worth in eqn.~\ref{fouri:110:22},
incidentally. Had
we tried to calculate the Fourier transform of~$1$that is, of
$f[v]\equiv 1$directly according to eqn.~\ref{fouri:byu} we would
have found
+
+The duality rule again incidentally proves its worth in
+eqn.~\ref{fouri:110:22}. Had we tried to calculate the Fourier
+transform of~$1$that is, of $f(v)\equiv 1$directly according to
+eqn.~\ref{fouri:byu} we would have found the pair
$
 1 \fouripair [1/\sqrt{2\pi}]
+ 1 \fouripair
+\linebreak % bad break
+ (1/\sqrt{2\pi})
\int_{\infty}^{\infty} e^{iv\theta} \,d\theta,
$
an impossible integral to evaluate.)
+the right side of which features an integral impossible to evaluate. A
+limit of some kind might perhaps have been enforced to circumvent the
+impossibility, but as you see again, duality is easier.
+
+\subsection{Delay and shift}
+\label{fouri:110.25}
+\index{Fourier transform!of a delayed function}
+\index{Fourier transform!frequencyshifted}
+\index{delay}
+\index{frequency shift}
+\index{shift!in frequency}
+
+Applying~(\ref{fouri:byu}) to~$f(va)$ and changing $\xi \la \theta  a$,
+we have the transform property of \emph{delay}:
+\bq{fouri:110:25}
+ f(va) \fouripair e^{iav}F(v).
+\eq
+Applying~(\ref{fouri:byu}) to~$e^{iav}f(v)$, we have the transform
+property of \emph{frequency shift}:
+\bq{fouri:110:26}
+ e^{iav}f(v) \fouripair F(va).
+\eq
+
+\subsection{Metaduality}
+\label{fouri:110.27}
+\index{duality}
+\index{metaduality}
+\index{Fourier transform!dual of}
+\index{Fourier transform!metadual of}
+
+Section~\ref{fouri:110.10} has shown how to compose the dual of a
+Fourier transform pair. One can likewise compose the dual of a
+Fourier transform \emph{property,} but to do so correctly wants careful
+handling.
+
+Defining\footnote{%
+ The Roman~$w$ of this subsection is not the Greek~$\omega$ of
+ \S~\ref{fouri:100.10}.%
+}%
+\[
+ \begin{split}
+ F(w) &\equiv \mathcal F \{f(u)\}, \\
+ \Phi(u) &\equiv \mathcal F \{\phi(w)\}, \\
+ \phi(w) &\equiv F(w),
+ \end{split}
+\]
+one can write~(\ref{fouri:110:12}) as
+\bq{fouri:110:27f}
+ \br{ccccccccc}
+ \cdots\ \fouripair &
+ \Phi(u) & \fouripair &
+ \phi(w) & \fouripair &
+ \Phi(u) & \fouripair &
+ \phi(w) & \fouripair \ \cdots
+ \\
+ \cdots\ \fouripair &
+ f(u) & \fouripair &
+ F(w) & \fouripair &
+ f(u) & \fouripair &
+ F(w) & \fouripair \ \cdots
+ \er
+\eq
+in which $\phi(w)=F(w)$ are vertically aligned, $\Phi(u)=f(u)$ are
+vertically aligned, and so on.
+Similarly,
+\bq{fouri:110:27g}
+ \br{ccccccccc}
+ \cdots\ \fouripair &
+ \Gamma(v) & \fouripair &
+ \gamma(v) & \fouripair &
+ \Gamma(v) & \fouripair &
+ \gamma(v) & \fouripair \ \cdots
+ \\
+ \cdots\ \fouripair &
+ g(v) & \fouripair &
+ G(v) & \fouripair &
+ g(v) & \fouripair &
+ G(v) & \fouripair \ \cdots
+ \er
+\eq
+These are just to repeat~(\ref{fouri:110:12}) in various symbols $\phi
+\la F$ and $\gamma \la G$, so they say nothing new, but the variant
+symbology helps for example as follows.
\subsection{Shifting, scaling and differentiation}
+Let the delay property~(\ref{fouri:110:25}) be styled as
+\[
+ \gamma(v) \fouripair \Gamma(v),
+\]
+where
+\[
+ \begin{split}
+ \gamma(v) &\equiv \phi(va), \\
+ \Gamma(v) &= e^{iav}\Phi(v), \\
+ \gamma(v) &= \phi(va), \\
+ \Gamma(v) &= e^{iav}\Phi(v), \\
+ w &\equiv va, \\
+ u &\equiv v.
+ \end{split}
+\]
+[One might be tempted to write $\gamma(v) = \phi(v+a)$ just above
+because $w=v+a$, but this would be a mistake; for though indeed
+$\gamma(v) = \phi(w)$, nothing has said that $\gamma(v) = \phi(w)$.
+No, one must evaluate $\gamma(v)$ consistently with the
+definition that $\gamma(v) \equiv \phi(va)$, substituting $v \la v$.]
+Changing symbols downward a line with respect to each of
+(\ref{fouri:110:27f}) and~(\ref{fouri:110:27g}) yields
+\[
+ G(v) \fouripair g(v),
+\]
+where
+\[
+ \begin{split}
+ G(v) &= F(va), \\
+ g(v) &= e^{iav}f(v), \\
+ G(v) &= F(va), \\
+ g(v) &= e^{iav}f(v).
+ \end{split}
+\]
+Since $g(v) \fouripair G(v)$, these mean that
+\[
+ e^{iav}f(v) \fouripair F(va),
+\]
+which is~(\ref{fouri:110:26}). Apparently, the frequencyshifting
+property is the dual of the delay property.
+
+Besides~(\ref{fouri:110:27f}) and~(\ref{fouri:110:27g}), the reverse
+skew is just as possible:
+\bq{fouri:110:28f}
+ \br{ccccccccc}
+ \cdots\ \fouripair &
+ \phi(w) & \fouripair &
+ \Phi(u) & \fouripair &
+ \phi(w) & \fouripair &
+ \Phi(u) & \fouripair \ \cdots
+ \\
+ \cdots\ \fouripair &
+ F(w) & \fouripair &
+ f(u) & \fouripair &
+ F(w) & \fouripair &
+ f(u) & \fouripair \ \cdots
+ \er
+\eq
+and
+\bq{fouri:110:28g}
+ \br{ccccccccc}
+ \cdots\ \fouripair &
+ \gamma(v) & \fouripair &
+ \Gamma(v) & \fouripair &
+ \gamma(v) & \fouripair &
+ \Gamma(v) & \fouripair \ \cdots
+ \\
+ \cdots\ \fouripair &
+ G(v) & \fouripair &
+ g(v) & \fouripair &
+ G(v) & \fouripair &
+ g(v) & \fouripair \ \cdots
+ \er
+\eq
+though the writer seems (for whatever reason) to have
+found the forward
+skew of~(\ref{fouri:110:27f}) and~(\ref{fouri:110:27g}) to be the
+more convenient. Either way, a lot of letters are used$\phi\Phi f
+F(wu)$ and $\gamma \Gamma g G(v)$ [and you can use yet more
+letters like~$\chi \mathrm X h H(w_\chi u_\chi)$ if you have an extra
+function to transform as, for instance, while deriving
+eqn.~\ref{fouri:110:43}]but the letters serve to keep the various
+relations straight and, anyway, you don't need so many letters to
+compute the dual~(\ref{fouri:110:10}) of an ordinary transform pair but
+only to compute the dual (or, if you prefer, \emph{metadual}) of a
+transform \emph{property.}
+
+\subsection{Summary of properties}
\label{fouri:110.30}
\index{Fourier transform!shifting of}
\index{Fourier transform!of a shifted function}
\index{Fourier transform!scaling of}
\index{Fourier transform!differentiation of}
\index{Fourier transform!of a derivative}
\index{Fourier transform!linearity of}
\index{derivative!of a Fourier transform}
\index{derivative!Fourier transform of}
Table~\ref{fouri:110:tbl20} lists several Fourier properties involving
shifting, scaling and differentiation, plus an expression of the Fourier
transform's linearity.
+Table~\ref{fouri:110:tbl20} summarizes properties of the Fourier
+transform.
\begin{table}
 \caption[Properties involving shifting, scaling and
 differentiation.]{Fourier properties involving shifting, scaling,
 differentiation and integration.}
+ \caption{Properties of the Fourier transform.}
\label{fouri:110:tbl20}
\bqb
+ F(v) &=& F^{*}(v)
+ \ \ \mbox{if $\Im(v) = 0$ and,} \\&&\quad
+ \mbox{for all such~$v$, $\Im[f(v)] = 0$.}\\
f(va) &\fouripair& e^{iav}F(v) \\
e^{iav}f(v) &\fouripair& F(va) \\
Af(\alpha v) &\fouripair& \frac{A}{\left\alpha\right}F\left(\frac{v}{\alpha}\right)
\ \ \mbox{if $\Im(\alpha)=0$, $\Re(\alpha)\neq 0$} \\
A_1f_1(v) + A_2f_2(v) &\fouripair& A_1F_1(v) + A_2F_2(v) \\
\frac{d}{dv}f(v) &\fouripair& ivF(v) \\
 ivf(v) &\fouripair& \frac{d}{dv}F(v) \\
+ ivf(v) &\fouripair& \frac{d}{dv}F(v) \\
+ \frac{d^n}{dv^n}f(v) &\fouripair& (iv)^nF(v) \\
+ (iv)^nf(v) &\fouripair& \frac{d^n}{dv^n}F(v) \\
+ &&\quad n\in\mathbb Z,\ n\ge 0\\
\int_{\infty}^v f(\tau) \,d\tau &\fouripair&
 \frac{F(v)}{iv} + \frac{2\pi}{2} F(0) \delta(v)
+ \frac{F(v)}{iv} + \pi F(0) \delta(v)
\eqb
\end{table}
The table's first property is proved by applying~(\ref{fouri:byu}) to
$f(va)$ then changing $\xi \la \theta  a$. The table's second
property is proved by applying~(\ref{fouri:byu}) to $e^{iav}f(v)$; or,
alternately, is proved through~(\ref{fouri:110:14}) as the composition
dual of the table's first property. The table's third property is
proved by applying~(\ref{fouri:byu}) to $Af(\alpha v)$ then changing
$\xi\la\alpha\theta$. The table's fourth property is proved trivially.

The table's fifth and sixth properties begin from the derivative of the
inverse Fourier transform; that is, of (\ref{fouri:byu})'s second line.
This derivative is
\bqb
 \frac{d}{dv}f(v) &=&
 \frac{1}{\sqrt{2\pi}}
 \int_{\infty}^{\infty}
 i\theta
 e^{iv\theta}
 F(\theta)
 \,d\theta
 \\&=&
+\index{scaling property of the Fourier transform}
+\index{Fourier transform!scaling of}
+The table's first three properties have been proved earlier in this
+section. The table's fourth property, that
+\bq{fouri:110:33}
+ Af(\alpha v) \fouripair \frac{A}{\left\alpha\right}F\left(\frac{v}{\alpha}\right)
+ \ \ \mbox{if $\Im(\alpha)=0$, $\Re(\alpha)\neq 0$},
+\eq
+which is the \emph{scaling property} of the Fourier transform, is
+proved by applying~(\ref{fouri:byu}) to $Af(\alpha v)$ and then changing
+$\xi\la\alpha\theta$ (the magnitude sign~$\cdot$ coming
+because~$\alpha$, if negative, reverses Fourier's infinite limits of
+integration in eqn.~\ref{fouri:byu}; see \S~\ref{inttx:reversal}).
+The table's fifth property is proved trivially.
+
+The table's sixth through ninth properties begin from the derivative of
+the inverse Fourier transform; that is, of (\ref{fouri:byu})'s second
+line. This derivative is
+\[
+ \frac{d}{dv}f(v) =
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
e^{iv\theta}
[i\theta F(\theta)]
\,d\theta
 \\&=& \mathcal{F}^{1}\{ivF(v)\},
\eqb
which implies
+ = \mathcal{F}^{1}\{ivF(v)\},
+\]
+which implies that
\[
\mathcal{F}\left\{\frac{d}{dv}f(v) \right\} = ivF(v),
\]
the table's fifth property. The sixth and last property is the
compositional dual~(\ref{fouri:110:14}) of the fifth.

Besides the identities this section derives, Table~\ref{fouri:110:tbl20}
also includes
% bad break
\linebreak
(\ref{fouri:125:10}), which \S~\ref{fouri:125} will prove.

+the table's sixth property. The metadual (\S~\ref{fouri:110.27}) of the
+sixth property is the table's seventh property, during the
+computation of which one observes that,
+\bq{fouri:110:35}
+ \mbox{if}\ \gamma(v) = \frac{d}{dv}\phi(v),\ \mbox{then}\
+ \gamma(v) = \frac{d}{dv}\phi(v),
+\eq
+a fact whose truth can be demonstrated via eqn.~\ref{drvtv:defz}'s
+definition of the derivative or, easier, can be seen by sketching on a
+sheet of paper some arbitrary, asymmetric function (like, say, $\phi[v]
+\equiv e^{v/3}$) and a visual approximation to its derivative. The
+table's eighth and ninth properties come by repeated application of the
+sixth and seventh.
+
+The table's tenth and last property is (\ref{fouri:125:10}).
+Section~\ref{fouri:125} will derive it.
+
+% diagn: the last time the following subsection was checked, errors
+% were found and corrected; therefore, the subsection wants checking
+% again sometime
\subsection{Convolution and correlation}
\label{fouri:110.40}
\index{convolution}
@@ 870,17 +937,22 @@ also includes
\index{Fourier transform!of a product}
\index{transfer function}
The concept of \emph{convolution} emerges from mechanical engineering
(or from its subdisciplines electrical and chemical engineering), in
which the response of a linear system to an impulse $\delta(t)$ is some
characteristic \emph{transfer function} $h(t)$. Since the system is
linear, it follows that its response to an arbitrary input $f(t)$ is
\[
 g(t) \equiv \int_{\infty}^{\infty} h(t\tau)f(\tau) \,d\tau;
\]
+\index{Dirac delta function}
+\index{delta function, Dirac}
+In mechanical and electrical engineering, the concept of
+\emph{convolution} emerges during the analysis of a linear system whose
+response to a Dirac impulse $\delta(t)$ is
+some characteristic \emph{transfer function} $h(t)$. To explore the
+mechanical origin and engineering application of the transfer function
+would exceed the book's; but, inasmuch as
+a system is linear and its response to a Dirac impulse $\delta(t)$ is
+indeed $h(t)$, its response to an arbitrary input $f(t)$ cannot but be
+\bq{fouri:110:40a}
+ g_1(t) \equiv \int_{\infty}^{\infty} h(t\tau)f(\tau) \,d\tau;
+\eq
or, changing $t/2+\tau \la \tau$ to improve the equation's symmetry,
\bq{fouri:110:40}
 g(t) \equiv \int_{\infty}^{\infty} h\left(\frac{t}{2}\tau\right)
+ g_1(t) \equiv \int_{\infty}^{\infty} h\left(\frac{t}{2}\tau\right)
f\left(\frac{t}{2}+\tau\right) \,d\tau.
\eq
This integral defines%
@@ 888,10 +960,10 @@ This integral defines%
convolution of the two functions $f(t)$ and $h(t)$.
Changing $v\la t$ and $\psi\la\tau$ in~(\ref{fouri:110:40}) to comport
with the notation found elsewhere in this section then
applying~(\ref{fouri:byu}) yields%
\footnote{
 See Ch.~\ref{fours}'s footnote~\ref{fours:100:fn10}.
+with the notation found elsewhere in this section and then
+applying~(\ref{fouri:byu}) yields that%
+\footnote{%
+ See \S~\ref{fours:100.10}.%
}
\bqb
\lefteqn{
@@ 919,7 +991,7 @@ applying~(\ref{fouri:byu}) yields%
\,d\theta
\,d\psi.
\eqb
Now changing $\phi \la \theta/2 + \psi$,
+Now changing $\phi \la \theta/2 + \psi$ within the inner integral,
\bqb
\lefteqn{
\mathcal{F}\left\{
@@ 945,7 +1017,7 @@ Now changing $\phi \la \theta/2 + \psi$,
\,d\psi
\,d\phi.
\eqb
Again changing $\chi \la \phi  2\psi$,
+Again changing $\mu \la \phi  2\psi$ within the inner integral,
\bqb
\lefteqn{
\mathcal{F}\left\{
@@ 959,14 +1031,14 @@ Again changing $\chi \la \phi  2\psi$,
\int_{\infty}^{\infty}
e^{iv\phi} f(\phi)
\int_{\infty}^{\infty}
 e^{iv\chi} h(\chi)
 \,d\chi
+ e^{iv\mu} h(\mu)
+ \,d\mu
\,d\phi
\\&=&
\left[\sqrt{2\pi}\right]
\left[
\frac{1}{\sqrt{2\pi}}
 \int_{\infty}^{\infty} e^{iv\chi} h(\chi) \,d\chi
+ \int_{\infty}^{\infty} e^{iv\mu} h(\mu) \,d\mu
\right]
\left[
\frac{1}{\sqrt{2\pi}}
@@ 983,7 +1055,48 @@ That is,
\,d\psi
\fouripair \left(\sqrt{2\pi}\right) H(v) F(v).
\eq
The compositional dual~(\ref{fouri:110:14}) of~(\ref{fouri:110:42}) is
+
+Symbolizing~(\ref{fouri:110:42}) in the manner of \S~\ref{fouri:110.27}
+as
+\[
+ \gamma(v) \fouripair \Gamma(v),
+\]
+where
+\[
+ \begin{split}
+ \gamma(v) &\equiv \int_{\infty}^{\infty}
+ \chi\left(\frac{v}{2}\psi\right)
+ \phi\left(\frac{v}{2}+\psi\right)\,d\psi,\\
+ \Gamma(v) &= \left(\sqrt{2\pi}\right) X(v) \Phi(v),\\
+ \gamma(v) &= \int_{\infty}^{\infty}
+ \chi\left(\frac{v}{2}\psi\right)
+ \phi\left(\frac{v}{2}+\psi\right)\,d\psi,\\
+ \Gamma(v) &= \left(\sqrt{2\pi}\right) X(v) \Phi(v),\\
+ w_\chi &\equiv \frac{v}{2}  \psi,\\
+ w_\phi &\equiv \frac{v}{2} + \psi,\\
+ u &\equiv v,
+ \end{split}
+\]
+or, after changing symbols downward a line with respect to each
+of~(\ref{fouri:110:27f}) and~(\ref{fouri:110:27g}),
+\[
+ G(v) \fouripair g(v),
+\]
+where
+\[
+ \begin{split}
+ G(v) &= \int_{\infty}^{\infty}
+ H\left(\frac{v}{2}\psi\right)
+ F\left(\frac{v}{2}+\psi\right)\,d\psi,\\
+ g(v) &= \left(\sqrt{2\pi}\right) h(v) f(v),\\
+ G(v) &= \int_{\infty}^{\infty}
+ H\left(\frac{v}{2}\psi\right)
+ F\left(\frac{v}{2}+\psi\right)\,d\psi,\\
+ g(v) &= \left(\sqrt{2\pi}\right) h(v) f(v),
+ \end{split}
+\]
+one finds the metadual $g(v)\fouripair G(v)$ of~(\ref{fouri:110:42})
+to be
\bq{fouri:110:43}
h(v) f(v)
\fouripair
@@ 991,17 +1104,21 @@ The compositional dual~(\ref{fouri:110:1
\int_{\infty}^{\infty}
H\left(\frac{v}{2}\psi\right)
F\left(\frac{v}{2}+\psi\right)
 \,d\psi,
+ \,d\psi.
\eq
in which we have changed $\psi\la\psi$ as the dummy variable of
integration. Whether by~(\ref{fouri:110:42}) or
by~(\ref{fouri:110:43}), convolution in the one domain evidently
transforms to multiplication in the other.
+Whether by~(\ref{fouri:110:42}) or by~(\ref{fouri:110:43}), convolution
+in the one domain evidently transforms to multiplication in the other.
+
+\newboolean{showFouriCorrelation}
+\setboolean{showFouriCorrelation}{false}
+\index{argument!real}
+\index{real argument}
+\index{Fourier transform!of a function whose argument is complex}
Closely related to the convolutional integral~(\ref{fouri:110:40}) is
the integral
\bq{fouri:110:45}
 g(t) \equiv \int_{\infty}^{\infty} h\left(\tau\frac{t}{2}\right)
+ g_2(t) \equiv \int_{\infty}^{\infty} h\left(\tau\frac{t}{2}\right)
f\left(\tau+\frac{t}{2}\right) \,d\tau,
\eq
whose transform and dual transform are computed as in the last paragraph
@@ 1023,8 +1140,176 @@ to be
\,d\psi.
\end{split}
\eq
Furthermore, according to~(\ref{fouri:110:18a}), $h^{*}(v) \fouripair
H^{*}(v^{*})$, so
+\ifthenelse{\boolean{showFouriCorrelation}}{%
+ Details:
+ \[
+ g(t) \equiv \int_{\infty}^{\infty} h\left(\tau\frac{t}{2}\right)
+ f\left(\tau+\frac{t}{2}\right) \,d\tau.
+ \]
+ Changing $v\la t$ and $\psi\la\tau$ and applying~(\ref{fouri:byu})
+ yields that
+ \bqb
+ \lefteqn{
+ \mathcal{F}\left\{
+ \int_{\infty}^{\infty}
+ h\left(\psi\frac{v}{2}\right)
+ f\left(\psi+\frac{v}{2}\right)
+ \,d\psi
+ \right\}
+ }&&\\&=&
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ e^{iv\theta}
+ \int_{\infty}^{\infty}
+ h\left(\psi\frac{\theta}{2}\right)
+ f\left(\psi+\frac{\theta}{2}\right)
+ \,d\psi
+ \,d\theta
+ \\&=&
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} \int_{\infty}^{\infty}
+ e^{iv\theta}
+ h\left(\psi\frac{\theta}{2}\right)
+ f\left(\psi+\frac{\theta}{2}\right)
+ \,d\theta
+ \,d\psi.
+ \eqb
+ Now changing $\phi \la \theta/2 + \psi$ within the inner integral,
+ \bqb
+ \lefteqn{
+ \mathcal{F}\left\{
+ \int_{\infty}^{\infty}
+ h\left(\psi\frac{v}{2}\right)
+ f\left(\psi+\frac{v}{2}\right)
+ \,d\psi
+ \right\}
+ }&&\\&=&
+ \frac{2}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} \int_{\infty}^{\infty}
+ e^{iv(2\phi2\psi)}
+ h(2\psi\phi)
+ f(\phi)
+ \,d\phi
+ \,d\psi
+ \\&=&
+ \frac{2}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ e^{iv\phi} f(\phi)
+ \int_{\infty}^{\infty}
+ e^{iv(\phi2\psi)} h(2\psi\phi)
+ \,d\psi
+ \,d\phi.
+ \eqb
+ Again changing $\mu \la \phi  2\psi$ within the inner integral,
+ \bqb
+ \lefteqn{
+ \mathcal{F}\left\{
+ \int_{\infty}^{\infty}
+ h\left(\psi\frac{v}{2}\right)
+ f\left(\psi+\frac{v}{2}\right)
+ \,d\psi
+ \right\}
+ }&&\\&=&
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ e^{iv\phi} f(\phi)
+ \int_{\infty}^{\infty}
+ e^{iv\mu} h(\mu)
+ \,d\mu
+ \,d\phi
+ \\&=&
+ \left[\sqrt{2\pi}\right]
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} e^{iv\mu} h(\mu) \,d\mu
+ \right]
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} e^{iv\phi} f(\phi) \,d\phi
+ \right]
+ \\&=&
+ \left[\sqrt{2\pi}\right]
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\mu=\infty}^{\infty} e^{i(v)(\mu)} h(\mu) \,d(\mu)
+ \right]\\&&\qquad\qquad\times
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} e^{iv\phi} f(\phi) \,d\phi
+ \right]
+ \\&=&
+ \left[\sqrt{2\pi}\right]
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\mu=\infty}^{\infty} e^{i(v)(\mu)} h(\mu) \,d(\mu)
+ \right]\\&&\qquad\qquad\times
+ \left[
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty} e^{iv\phi} f(\phi) \,d\phi
+ \right]
+ \\&=&
+ \left(\sqrt{2\pi}\right) H(v) F(v).
+ \eqb
+ That is,
+ \[
+ \int_{\infty}^{\infty}
+ h\left(\psi\frac{v}{2}\right)
+ f\left(\psi+\frac{v}{2}\right)
+ \,d\psi
+ \fouripair \left(\sqrt{2\pi}\right) H(v) F(v).
+ \]
+ Symbolizing in the manner of \S~\ref{fouri:110.27} as
+ \[
+ \gamma(v) \fouripair \Gamma(v),
+ \]
+ where
+ \[
+ \begin{split}
+ \gamma(v) &\equiv \int_{\infty}^{\infty}
+ \chi\left(\psi\frac{v}{2}\right)
+ \phi\left(\psi+\frac{v}{2}\right)\,d\psi,\\
+ \Gamma(v) &= \left(\sqrt{2\pi}\right) X(v) \Phi(v),\\
+ \gamma(v) &= \int_{\infty}^{\infty}
+ \chi\left(\psi+\frac{v}{2}\right)
+ \phi\left(\psi\frac{v}{2}\right)\,d\psi,\\
+ \Gamma(v) &= \left(\sqrt{2\pi}\right) X(v) \Phi(v),\\
+ w_\chi &\equiv \psi  \frac{v}{2},\\
+ w_\phi &\equiv \psi + \frac{v}{2},\\
+ u &\equiv v,
+ \end{split}
+ \]
+ or, after changing symbols downward a line with respect to each
+ of~(\ref{fouri:110:27f}) and~(\ref{fouri:110:27g}),
+ \[
+ G(v) \fouripair g(v),
+ \]
+ where
+ \[
+ \begin{split}
+ G(v) &= \int_{\infty}^{\infty}
+ H\left(\psi\frac{v}{2}\right)
+ F\left(\psi+\frac{v}{2}\right)\,d\psi,\\
+ g(v) &= \left(\sqrt{2\pi}\right) h(v) f(v),\\
+ G(v) &= \int_{\infty}^{\infty}
+ H\left(\psi+\frac{v}{2}\right)
+ F\left(\psi\frac{v}{2}\right)\,d\psi,\\
+ g(v) &= \left(\sqrt{2\pi}\right) h(v) f(v),
+ \end{split}
+ \]
+ one finds the metadual $g(v)\fouripair G(v)$ to be
+ \[
+ h(v) f(v)
+ \fouripair
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ H\left(\psi\frac{v}{2}\right)
+ F\left(\psi+\frac{v}{2}\right)
+ \,d\psi.
+ \]
+ (End of details.)
+}{}
+Furthermore, according to~(\ref{fouri:110:18a}), $h^{*}(t) \fouripair
+H^{*}(\omega^{*})$, so
\bq{fouri:110:48}
\begin{split}
\int_{\infty}^{\infty}
@@ 1033,19 +1318,17 @@ H^{*}(v^{*})$, so
\,d\psi
&\fouripair \left(\sqrt{2\pi}\right) H^{*}(v^{*}) F(v),
\\
 h^{*}(v^{*}) f(v)
+ h^{*}(v) f(v)
&\fouripair
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
 H^{*}\left(\psi\frac{v}{2}\right)
 F\left(\psi+\frac{v}{2}\right)
 \,d\psi,
+ H^{*}\bigg(\frac{v^{*}}{2}\psi\bigg)
+ F\bigg(\frac{v}{2}+\psi\bigg)
+ \,d\psi;
\end{split}
\eq
in which the second line is the compositional dual of the first with, as
before, the dummy variable $\psi\la\psi$ changed; and indeed one can do
the same to the transform~(\ref{fouri:110:42}) of the
convolutional integral, obtaining
+and indeed one can do the same to the transforms~(\ref{fouri:110:42})
+and~(\ref{fouri:110:43}) of the convolutional integral, obtaining
\bq{fouri:110:49}
\begin{split}
\int_{\infty}^{\infty}
@@ 1058,13 +1341,27 @@ convolutional integral, obtaining
&\fouripair
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
 H^{*}\left(\frac{v}{2}\psi\right)
 F\left(\frac{v}{2}+\psi\right)
+ H^{*}\bigg(\psi\frac{v^{*}}{2}\bigg)
+ F\bigg(\psi+\frac{v}{2}\bigg)
\,d\psi.
\end{split}
\eq
+% diagn: new parenthetical note wants review
+(The~$v^{*}$ of eqns.~\ref{fouri:110:48} and~\ref{fouri:110:49} seems
+to imply that the argument~$v$ might be complex. Though the writer has
+encountered applications with complex~$h$ and~$f$, and though
+complex~$H$ and~$F$ are the norm, the writer has never yet met
+an application with complex~$v$. How to interpret the case of
+complex~$v$, or whether such a case is even valid in Fourier work, are
+questions left open to the reader's consideration. It is perhaps
+interesting that H.~F.\ Davis, author of a book on Fourier mathematics,
+does not in his book seem to consider the transform of a function whose
+argument is complex at all.\footnote{\cite[\S~6.7]{HFDavis}}
+Still, it appears that one can consider complex $v\neq v^{*}$ at
+least in a formal sense, as in \S~\ref{fouri:110.15}; yet in
+applications at any rate, normally and maybe always, $v=v^{*}$ will
+be real.)
\index{autocorrelation}
\index{convolution!commutivity of}
\index{convolution!associativity of}
\index{commutivity!of convolution}
@@ 1074,14 +1371,17 @@ known as convolution, the operation the
expresses has no special name as far as the writer is aware. However,
the operation its variant
\bq{fouri:110:45a}
 g(t) \equiv \int_{\infty}^{\infty} h^{*}\left(\tau\frac{t}{2}\right)
+ g_3(t) \equiv \int_{\infty}^{\infty} h^{*}\left(\tau\frac{t}{2}\right)
f\left(\tau+\frac{t}{2}\right) \,d\tau
\eq
+%of~(\ref{fouri:110:48})
expresses does have a name. It is called \emph{correlation,} being a
measure of the degree to which one function tracks another with an
offset in the independent variable. Reviewing this subsection, we see
that in~(\ref{fouri:110:48}) we have already determined the transform
and dual transform of the correlational integral~(\ref{fouri:110:45a}).
+of the correlational integral~(\ref{fouri:110:45a}). Moreover, assuming
+that $\Im(t) = 0$, we see in~(\ref{fouri:110:49})'s second line that
+we have already determined the dual of this transform, as well.
Convolution and correlation arise often enough in applications to enjoy
their own, peculiar notations%
\footnote{\cite[\S~19.4]{JJH}}
@@ 1095,36 +1395,69 @@ for convolution and
R_{fh}(t) \equiv \int_{\infty}^{\infty} h^{*}\left(\tau\frac{t}{2}\right)
f\left(\tau+\frac{t}{2}\right) \,d\tau
\eq
for correlation. Nothing prevents one from correlating a function with
+for correlation.
+
+\index{autocorrelation}
+Nothing prevents one from correlating a function with
itself, incidentally. The \emph{autocorrelation}
\bq{fouri:110:54}
 R_{ff}(t) = \int_{\infty}^{\infty} f^{*}\left(\tau\frac{t}{2}\right)
+ R_{f\!f}(t) = \int_{\infty}^{\infty} f^{*}\left(\tau\frac{t}{2}\right)
f\left(\tau+\frac{t}{2}\right) \,d\tau
\eq
proves useful at times.%
\footnote{\cite[\S~1.6A]{Hsu:comm}}
% diagn: the following new rest of the paragraph and
% the associated new table entries want review.
For convolution, the commutative and associative properties that
+proves useful at times.\footnote{\cite[\S~1.6A]{Hsu:comm}}
+For convolution, commutative and associative properties that
\bq{fouri:110:51}
\begin{split}
f(t) \ast h(t) &= h(t) \ast f(t), \\
f(t) \ast [g(t) \ast h(t)] &= [f(t) \ast g(t)] \ast h(t),
\end{split}
\eq
are useful, too, where the former may be demonstrated by changing $\tau \la
\tau$ in~(\ref{fouri:110:50}) and, through Fourier transformation, both may
be demonstrated as $f(v) \ast [g(v) \ast h(v)]
\fouripair (\sqrt{2\pi})F(v)[(\sqrt{2\pi})G(v)H(v)] =
% bad break
\linebreak
+may be demonstrated, the former by changing $\tau \la
+\tau$ in~(\ref{fouri:110:50}) and the latter
+by Fourier transformation
+as $f(v) \ast [g(v) \ast h(v)]
+\fouripair
+(\sqrt{2\pi})F(v)
+\linebreak\times % bad break: this whole line is deletable if the break is resolved
+[(\sqrt{2\pi})G(v)H(v)] =
(\sqrt{2\pi})[(\sqrt{2\pi})F(v)G(v)]H(v)
\stackrel{\mathcal{F}^{1}}{\ra} [f(v) \ast g(v)] \ast h(v)$
and similarly for the commutative property.
+\invfouripair [f(v) \ast g(v)] \ast h(v)$.
+
+\index{energy spectral density}
+\index{spectral density}
+\index{density!spectral}
+\index{noise}
+\index{signaling}
+\index{electronic signaling}
+In most cases of practical interest in applications,~$v$ is probably
+real even when $H(v)$
+and $F(v)$ are not, so one can use~(\ref{fouri:110:54}) to write a
+transform pair like the first line of~(\ref{fouri:110:48}) in the style of
+\bq{fouri:110:57}
+ R_{f\!f}(t) \fouripair
+ \left(\sqrt{2\pi}\right) \leftF(t)\right^{2},\ \ \Im(t) = 0.
+\eq
+Electrical engineers call the quantity $\leftF(t)\right^2$
+on (\ref{fouri:110:57})'s right the \emph{energy spectral density} of
+$f(t)$.\footnote{\cite[\S~1.6B]{Hsu:comm}}
+Equation~(\ref{fouri:110:57}) is significant among other reasons because,
+in electronic signalingespecially where the inevitable imposition of
+random environmental \emph{noise} has degraded the signalit may
+happen that an adequate estimate of $R_{f\!f}(t)$ is immediately available
+while sufficient information regarding $F(\omega)$ is unavailable. When
+this is the case,~(\ref{fouri:110:57}) affords an elegant, indirect way
+to calculate an energy spectral density even if more direct methods
+cannot be invoked.
Tables~\ref{fouri:110:tbl40} and~\ref{fouri:110:tbl41} summarize.
\begin{table}
 \caption{Convolution and correlation, and their Fourier properties.}
+ \caption%
+ [Convolution and correlation, and their Fourier properties.]%
+ {%
+ Convolution and correlation, and their Fourier properties.
+ (Note that, though the table provides for complex~$v$, $v$ is
+ typically real.)%
+ }
\label{fouri:110:tbl40}
\bqb
\int_{\infty}^{\infty}
@@ 1170,9 +1503,9 @@ Tables~\ref{fouri:110:tbl40} and~\ref{fo
&\fouripair&
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
 H^{*}\left(\frac{v}{2}\psi\right)
+ H^{*}\left(\psi\frac{v^{*}}{2}\right)
\\&&\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \mbox{}\times
 F\left(\frac{v}{2}+\psi\right)
+ F\left(\psi+\frac{v}{2}\right)
\,d\psi
\\
\int_{\infty}^{\infty}
@@ 1182,25 +1515,25 @@ Tables~\ref{fouri:110:tbl40} and~\ref{fo
&\fouripair&
\left(\sqrt{2\pi}\right) H^{*}(v^{*}) F(v)
\\
 h^{*}(v^{*}) f(v)
+ h^{*}(v) f(v)
&\fouripair&
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty}
 H^{*}\left(\psi\frac{v}{2}\right)
+ H^{*}\left(\frac{v^{*}}{2}\psi\right)
\\&&\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \mbox{}\times
 F\left(\psi+\frac{v}{2}\right)
+ F\left(\frac{v}{2}+\psi\right)
\,d\psi
\eqb
\end{table}
\begin{table}
\caption[Convolution and correlation in their peculiar notation.]{%
Convolution and correlation in their peculiar notation.
 % diagn: the next sentence is new and wants review.
(Note that the~$\ast$ which appears in the table as $h[t] \ast f[t]$
 differs in meaning from the~$\mbox{}^{*}$ in $h^{*}[v^{*}]$.)%
+ differs in meaning from the~$\mbox{}^{*}$ in $h^{*}[v]$.)%
}
\label{fouri:110:tbl41}
\bqb
+ \Im(t) &=& 0\\
f(t) \ast h(t) = h(t) \ast f(t)
&\equiv&
\int_{\infty}^{\infty} h\left(\frac{t}{2}\tau\right)
@@ 1211,67 +1544,40 @@ Tables~\ref{fouri:110:tbl40} and~\ref{fo
\int_{\infty}^{\infty} h^{*}\left(\tau\frac{t}{2}\right)
f\left(\tau+\frac{t}{2}\right) \,d\tau
\\
 h(v) \ast f(v)
+ h(t) \ast f(t)
&\fouripair&
 \left(\sqrt{2\pi}\right) H(v) F(v)
+ \left(\sqrt{2\pi}\right) H(t) F(t)
\\
 h(v) f(v)
+ h(t) f(t)
&\fouripair&
 \frac{1}{\sqrt{2\pi}}[H(v) \ast F(v)]
+ \frac{1}{\sqrt{2\pi}}[H(t) \ast F(t)]
\\
 R_{fh}(v)
+ R_{fh}(t)
&\fouripair&
 \left(\sqrt{2\pi}\right) H^{*}(v^{*}) F(v)
+ \left(\sqrt{2\pi}\right) H^{*}(t) F(t)
+ \\
+ h^{*}(t) f(t)
+ &\fouripair&
+ \frac{1}{\sqrt{2\pi}}R_{FH}(t)
\\
 h^{*}(v^{*}) f(v)
+ R_{f\!f}(t)
&\fouripair&
 \frac{1}{\sqrt{2\pi}}R_{FH}(v)
+ \left(\sqrt{2\pi}\right) \leftF(t)\right^{2}
\\
f(t) \ast [g(t) \ast h(t)] &=& [f(t) \ast g(t)] \ast h(t)
\eqb
\end{table}
\index{energy spectral density}
\index{spectral density}
\index{density!spectral}
Before closing the section, we should take note of one entry
of~\ref{fouri:110:tbl41} in particular, $R_{fh}(v) \fouripair
(\sqrt{2\pi}) H^{*}(v^{*}) F(v)$. This same entry is also found in
Table~\ref{fouri:110:tbl40} in other notationindeed it is just
the first line of~(\ref{fouri:110:48})but when written in the
correlation's peculiar notation it draws attention to a peculiar result.
Scaled by $1/\sqrt{2\pi}$, the \emph{autocorrelation} and its
Fourier transform are evidently
\[
 \frac{1}{\sqrt{2\pi}} R_{ff}(v) \fouripair F^{*}(v^{*}) F(v).
\]
For%
\footnote{
 Electrical engineers call the quantity $\leftF(v)\right^2$ on
 (\ref{fouri:110:57})'s right the \emph{energy spectral density} of
 $f(v)$.~\cite[\S~1.6B]{Hsu:comm}
}
real~$v$,
\bq{fouri:110:57}
 \frac{1}{\sqrt{2\pi}} R_{ff}(v) \fouripair \leftF(v)\right^2
 \ \ \mbox{if $\Im(v) = 0$.}
\eq

%Section \S~\ref{fouri:110.55} will comment on the notion of energy in
%signals, but for the moment we should like to observe that, in practical
%electrical control systems, it sometimes happens that an adequate
%estimate of $R_{ff}(v)$ is immediately available whereas that the
%history of $f(v)$ is not. When this is the case,~(\ref{fouri:110:57})
%gives an elegant way to calculate an energy spectral density.

% diagn: From here on is largely new.

\subsection{Parseval's theorem}
\label{fouri:110.55}
\index{Parseval's theorem}
\index{Parseval, MarcAntoine (17551836)}
By successive steps,
+Provided that
+\[
+ \Im(v) = 0,
+\]
+one finds that
\bqb
\int_{\infty}^{\infty} h^{*}(v) f(v) \,dv
&=&
@@ 1286,13 +1592,13 @@ By successive steps,
\\&=&
\int_{\infty}^{\infty} \left[
\frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{iv\theta} h(v) \,dv
 \right]^{*} F(\theta) \,d\theta.
+ \right]^{*} F(\theta) \,d\theta
\\&=&
\int_{\infty}^{\infty} H^{*}(\theta) F(\theta) \,d\theta,
\eqb
in which we have used~(\ref{fouri:byu}), interchanged the integrations
and assumed that the dummy variables~$v$ and~$\theta$ of integration
remain real. Changing $v \la \theta$ on the right, we have that
+in which an interchange of integrations between two
+applications of~(\ref{fouri:byu}) has been interposed.
+Changing $v \la \theta$ on the right,
\bq{fouri:110:parseval}
\int_{\infty}^{\infty} h^{*}(v) f(v) \,dv =
\int_{\infty}^{\infty} H^{*}(v) F(v) \,dv.
@@ 1301,6 +1607,7 @@ This is \emph{Parseval's theorem.}%
\footnote{
\cite[\S~22]{Couch}\cite[\S~1.6B]{Hsu:comm}
}
+It is related to Parseval's principle of \S~\ref{fours:080}.
\index{time}
\index{energy}
@@ 1316,7 +1623,8 @@ This is \emph{Parseval's theorem.}%
\index{channel}
\index{$I$ channel}
\index{$Q$ channel}
Especially interesting is the special case $h(t) = f(t)$, when
+Especially interesting to Parseval's theorem is the case of
+$h(v) = f(v)$, in which
\bq{fouri:110:parseval2}
\int_{\infty}^{\infty} \leftf(v)\right^2 \,dv =
\int_{\infty}^{\infty} \leftF(v)\right^2 \,dv.
@@ 1329,20 +1637,20 @@ When this is written as
and~$t$, $\leftf(t)\right^2$, $\omega$ and $\leftF(\omega)\right^2$
respectively have physical dimensions of time, energy per unit time,
angular frequency and energy per unit angular frequency, then the
theorem conveys the important physical insight that energy transmitted
at various times can equally well be regarded as energy transmitted at
various frequencies. This works for space and spatial frequencies, too:
see \S~\ref{fours:085}. For real $f(v)$, one can
write~(\ref{fouri:110:parseval2}) as
+theorem conveys the important physical insight that energy transferred
+at various times can equally well be regarded as energy
+transferred \emph{at various frequencies.} This works for space and
+spatial frequencies, too: see \S~\ref{fours:085}. For real $f(v)$, one
+can write~(\ref{fouri:110:parseval2}) as
\bq{fouri:110:parseval2a}
\int_{\infty}^{\infty} f^2(v) \,dv =
\int_{\infty}^{\infty} \Re^2[F(v)] \,dv
 + \int_{\infty}^{\infty} \Im^2[F(v)] \,dv
+ + \int_{\infty}^{\infty} \Im^2[F(v)] \,dv,\ \ \Im[f(v)] = 0,
\eq
which expresses the principle of \emph{quadrature,} conveying the
additional physical insight that a single frequency can carry energy
in not one but each of two distinct, independent channels; namely, a
\emph{realphased, inphase} or~$I$ channel and an
+in not only one but each of two distinct, independent channels; namely,
+a \emph{realphased, inphase} or~$I$ channel and an
\emph{imaginaryphased, quadraturephase} or~$Q$ channel.%
\footnote{\cite[\S~51]{Couch}}
Practical digital electronic communications systems, wired or wireless,
@@ 1362,18 +1670,25 @@ Symbolically,
\item if $f(v) = f(v)$ for all~$v$, then $F(v) = F(v)$;
\item if $f(v) = f(v)$ for all~$v$, then $F(v) = F(v)$.
\ei
In the odd case, this is seen by expressing $F(v)$ per~(\ref{fouri:byu}) as
+The odd case is proved by expressing $F(v)$ per~(\ref{fouri:byu}) as
\[
 F(v) = \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{i(v)\theta} f(\theta) \,d\theta,
+ F(v) = \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{i(v)\theta} f(\theta) \,d\theta
\]
then changing the dummy variable $\theta \la \theta$ to get
\bqb
+and then changing $\theta \la \theta$ to get that
+\[
F(v)
 &=& \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{i(v)(\theta)} f(\theta) \,d\theta
 \\&=& \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{iv\theta} f(\theta) \,d\theta
 \\&=& F(v).
\eqb
The even case is analyzed likewise. See \S~\ref{taylor:365}.
+ = \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{i(v)(\theta)} f(\theta) \,d\theta
+ = \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{iv\theta}
+ f(\theta) \,d\theta.
+\]
+That~$f(v)$ should be odd means by definition that $f(\theta) =
+f(\theta)$, so
+\[
+ F(v)
+ = \frac{1}{\sqrt{2\pi}} \int_{\infty}^{\infty} e^{iv\theta}
+ f(\theta) \,d\theta = F(v).
+\]
+The even case is proved similarly. See \S~\ref{taylor:365}.
% 
@@ 1384,40 +1699,86 @@ The even case is analyzed likewise. See
\index{sineargument function!Fourier transform of}
\index{Fourier transform!of the sineargument function}
We have already computed the Fourier transforms of $\Pi(v)$,
$\Lambda(v)$, $\delta(v)$ and the constant~$1$ in~(\ref{fouri:100:44}),
(\ref{fouri:100:41}), (\ref{fouri:110:20}) and~(\ref{fouri:110:22}),
respectively. The duals~(\ref{fouri:110:12}) of the first two of these
are evidently
\[
 \begin{split}
 \sinarg\left(\frac v2\right) &\fouripair \left(\sqrt{2\pi}\right)\Pi(v), \\
 \sinarg^2\left(\frac v2\right) &\fouripair \left(\sqrt{2\pi}\right)\Lambda(v);
 \end{split}
\]
or, since $\Pi(v)=\Pi(v)$ and $\Lambda(v)=\Lambda(v)$,
\[
 \begin{split}
 \sinarg\left(\frac v2\right) &\fouripair \left(\sqrt{2\pi}\right)\Pi(v), \\
 \sinarg^2\left(\frac v2\right) &\fouripair \left(\sqrt{2\pi}\right)\Lambda(v),
 \end{split}
\]
which by the scaling property of Table~\ref{fouri:110:tbl20} imply that%
\footnote{
 In electronic communications systems, including radio, the first line
 of~(\ref{fouri:120:20}) implies significantly that, to spread
 energy evenly over an available ``baseband'' but to let no energy
 leak outside that band, one should transmit sineargumentshaped
 pulses as in Fig.~\ref{fours:160:fig}.
+We have already computed the Fourier transforms of several functions
+in \S\S~\ref{fouri:100.40} and~\ref{fouri:110.20}. We have also
+already computed duals of these but would still like to put the duals
+into more pleasing forms. For example, the dual
+of
+\[
+ \Lambda(v) \fouripair \frac{\sinarg^2(v/2)}{\sqrt{2\pi}}
+\]
+is, according to~(\ref{fouri:110:15}),
+\[
+ \frac{\sinarg^2(v/2)}{\sqrt{2\pi}} \fouripair \Lambda(v).
+\]
+The scaling property of~(\ref{fouri:110:33}) and
+Table~\ref{fouri:110:tbl20}, using $\alpha = 2$,
+makes it
+\[
+ \sinarg^2 v \fouripair
+ \frac{\sqrt{2\pi}}{2}\Lambda\left(\frac{v}2\right),
+\]
+which is probably more convenient than~(\ref{fouri:110:15}) to use when
+one meets a $\sinarg^2(\cdot)$ and wishes to transform it.
+
+Tables~\ref{fouri:120:tbl20} and~\ref{fouri:120:tbl25} list the last
+transform pair and others similarly derived.\footnote{%
+ In electronic signaling systems, including radio, the table's
+ transform pair
+ $\sinarg(v) \fouripair \frac{\sqrt{2\pi}}{2}\Pi\left(\frac v2\right)$
+ implies significantly that, to spread energy evenly over an available
+ ``baseband'' but to let no energy leak outside that band, one should
+ transmit sineargumentshaped pulses as in Fig.~\ref{fours:160:fig}.%
}
\bq{fouri:120:20}
 \begin{split}
 \sinarg(v) &\fouripair \frac{\sqrt{2\pi}}{2}\Pi\left(\frac v2\right), \\
 \sinarg^2(v) &\fouripair \frac{\sqrt{2\pi}}{2}\Lambda\left(\frac v2\right).
 \end{split}
\eq
+% diagn: the next sentence, revised, wants review
+The tables lists further transform pairs as wellsome as gleaned from
+earlier in the chapter; others as computed in the last paragraph's way,
+as adapted by the properties of Table~\ref{fouri:110:tbl20} (especially
+the properties of delay, shift and scaling), or as derived in this
+section's subsections to follow.
+\begin{table}
+ \caption[Fourier transform pairs.]{%
+ Fourier transform pairs. (See also Table~\ref{fouri:120:tbl25}.)%
+ }
+ \label{fouri:120:tbl20}
+ \index{Fourier transform pair}
+ \index{transform pair}
+ \bqb
+ 1 &\fouripair& \left(\sqrt{2\pi}\right) \delta(v) \\
+ u(v) &\fouripair& \frac{1}{\left(\sqrt{2\pi}\right)iv} + \frac{\sqrt{2\pi}}{2} \delta(v) \\
+ \delta(v) &\fouripair& \frac{1}{\sqrt{2\pi}} \\
+ \Pi(v) &\fouripair& \frac{\sinarg(v/2)}{\sqrt{2\pi}} \\
+ \Lambda(v) &\fouripair& \frac{\sinarg^2(v/2)}{\sqrt{2\pi}} \\
+ \Psi(v) &\fouripair& \frac{\sinarg v}{\sqrt{2\pi}\left[1(v/\pi)^2\right]} \\
+ u(v)e^{av} &\fouripair& \frac{1}{\left(\sqrt{2\pi}\right)(a+iv)},
+ \ \ \Re(a) > 0 \\
+ u(v)e^{av}v^n &\fouripair& \frac{n!}{\left(\sqrt{2\pi}\right)(a+iv)^{n+1}}, \\
+ &&\ \ \ \ \ \ \Re(a) > 0,\ n \in \mathbb Z, \ n \ge 0 \\
+ e^{iav} &\fouripair& \left(\sqrt{2\pi}\right) \delta(va) \\
+ \sin av &\fouripair& \frac{\sqrt{2\pi}}{j2}\left[ \delta(va)  \delta(v+a) \right] \\
+ \cos av &\fouripair& \frac{\sqrt{2\pi}}{2}\left[ \delta(va) + \delta(v+a) \right] \\
+ \sinarg(v) &\fouripair& \frac{\sqrt{2\pi}}{2}\Pi\left(\frac v2\right) \\
+ \sinarg^2(v) &\fouripair& \frac{\sqrt{2\pi}}{2}\Lambda\left(\frac v2\right) \\
+ \frac{\sinarg v}{\left[1(v/\pi)^2\right]} &\fouripair& \left(\sqrt{2\pi}\right)\Psi(v) \\
+ \sum_{j=\infty}^{\infty} \delta(vjT_1) &\fouripair&
+ \frac{\sqrt{2\pi}}{T_1} \sum_{j=\infty}^{\infty} \delta\left(vj\frac{2\pi}{T_1}\right)
+ \eqb
+\end{table}
+\begin{table}
+ \caption{Fourier autotransform pairs.}
+ \label{fouri:120:tbl25}
+ \index{Fourier autotransform pair}
+ \index{autotransform pair}
+ \bqb
+ \sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right) &\fouripair&
+ \sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right) \\
+ \Omega(v) &\fouripair& \Omega(v)
+ \eqb
+\end{table}
+
+\subsection{Exponential decay and the Heaviside unit step}
+\label{fouri:120.30}
% diagn: heavily revised paragraph wants close review.
\index{Heaviside unit step function!Fourier transform of}
\index{unit step function, Heaviside!Fourier transform of}
\index{Fourier transform!of the Heaviside unit step}
@@ 1427,8 +1788,8 @@ which by the scaling property of Table~\
\index{natural exponential!Fourier transform of}
\index{Fourier transform!of the natural exponential}
Applying the Fourier transform's definition~(\ref{fouri:byu}) to
$u(v)e^{av}$, where $u(v)$ is the Heaviside unit
step~(\ref{integ:670:10}), yields
+$u(v)e^{av}$, where $u(v)$ is Heaviside's unit
+step~(\ref{integ:670:10}), yields that
\[
\mathcal F \left\{u(v)e^{av}\right\} =
\frac{1}{\sqrt{2\pi}}
@@ 1439,7 +1800,7 @@ step~(\ref{integ:670:10}), yields
\frac{1}{\sqrt{2\pi}}
\left[
\frac{e^{(a+iv)\theta}}{(a+iv)}
 \right]_{0}^{\infty},
+ \right]_{\theta=0}^{\infty},
\]
revealing the transform pair
\bq{fouri:120:21}
@@ 1456,7 +1817,7 @@ where the necessary term $C\delta(v)$, w
merely admits that we do not yet know how to
evaluate~(\ref{fouri:120:21}) when both~$a$ and~$v$ vanish at once.
What we do know from \S~\ref{fouri:110.65} is that odd functions have
odd transforms and that (as one can readily see in
+odd transforms and that (as one can see in
Fig.~\ref{integ:670:figu}) one can convert $u(v)$ to an odd function by
the simple expedient of subtracting $1/2$ from it. Since $1/2
\fouripair (\sqrt{2\pi}/2) \delta(v)$ according to~(\ref{fouri:110:22}),
@@ 1472,36 +1833,38 @@ The transform pair
u(v) \fouripair
\frac{1}{\left(\sqrt{2\pi}\right)iv} + \frac{\sqrt{2\pi}}{2} \delta(v)
\eq
results. On the other hand, according to Table~\ref{inttx:470:tbl},
+results.
+
+Invoking~(\ref{fouri:byu}),
\[
 e^{av}v^n
 = \frac{d}{dv} \sum_{k=0}^n
 \frac{e^{av}v^k}{ (n!/k!) a^{nk+1} },
+ u(v)e^{av}v^n
+ \fouripair \frac{1}{\sqrt{2\pi}} \int_0^\infty
+ e^{(a+iv)\theta}\theta^n \,d\theta.
\]
so
\bqb
 \mathcal F \left\{
+Evaluating the antiderivative via Table~\ref{inttx:470:tbl} with
+$\alpha \la (a+iv)$,
+\[
u(v)e^{av}v^n
 \right\}
 &=& \frac{1}{\sqrt{2\pi}} \int_0^\infty
 e^{(a+iv)\theta}\theta^n \,d\theta
 \\&=& \frac{1}{\sqrt{2\pi}} \sum_{k=0}^n \left.
 \frac{ e^{(a+iv)\theta}\theta^k }{ (n!/k!)(a+iv)^{nk+1} }
+ \fouripair \frac{e^{(a+iv)\theta}}{\sqrt{2\pi}} \sum_{k=0}^n \left.
+ \frac{ (n!/k!)\theta^k }{ (a+iv)^{nk+1} }
\right_0^\infty.
\eqb
+\]
Since all but the $k=0$ term vanish, the last equation implies the
transform pair
+transform pair\footnote{\cite[Table~5.2]{Phillips/Parr}}%
\bq{fouri:120:24}
 u(v)e^{av}v^n \fouripair \frac{1}{\sqrt{2\pi}n!(a+iv)^{n+1}},
+ u(v)e^{av}v^n \fouripair \frac{n!}{\sqrt{2\pi}(a+iv)^{n+1}},
\ \ \Re(a) > 0, \ n \in \mathbb Z, \ n \ge 0.
\eq
\index{Fourier transform!of a sinusoid}
+\subsection{Sinusoids}
+\label{fouri:120.31}
+
\index{Fourier transform!of a sinusoid}
\index{sine!Fourier transform of}
\index{cosine!Fourier transform of}
+\index{trigonometric function!Fourier transform of}
The Fourier transforms of $\sin av$ and $\cos av$ are interesting and
important, and can be computed straightforwardly from the pairs
+important. One can compute them from the pairs
\bq{fouri:120:26}
\begin{split}
e^{iav} &\fouripair \left(\sqrt{2\pi}\right) \delta(va), \\
@@ 1510,7 +1873,7 @@ important, and can be computed straightf
\eq
which result by applying to~(\ref{fouri:110:22})
Table~\ref{fouri:110:tbl20}'s property that $e^{iav}f(v) \fouripair
F(va)$. Composing per Table~\ref{cexp:tblprop} the trigonometrics
+F(va)$. Composing by Table~\ref{cexp:tblprop} the trigonometrics
from their complex parts, we have that
\bq{fouri:120:23}
\begin{split}
@@ 1525,6 +1888,9 @@ from their complex parts, we have that
\end{split}
\eq
+\subsection{The Dirac delta pulse train}
+\label{fouri:120.35}
+
\index{pulse train!Fourier transform of}
\index{Dirac delta pulse train!Fourier transform of}
\index{Fourier transform!of a Dirac delta pulse train}
@@ 1542,90 +1908,19 @@ the transform of which according to~(\re
\frac{\sqrt{2\pi}}{T_1} \sum_{j=\infty}^{\infty}
\delta\left(vj\frac{2\pi}{T_1}\right).
\eq
Apparently, the further the pulses of the original train, the closer the
+Apparently, the farther the pulses of the original train, the nearer the
pulses of the transformed train, and vice versa; yet, even when
transformed, the train remains a train of Dirac deltas. Letting $T_1 =
\sqrt{2\pi}$ in~(\ref{fouri:120:25}) we find the pair
+\sqrt{2\pi}$ in~(\ref{fouri:120:25}) yields the pair
\bq{fouri:120:25a}
\sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right) \fouripair
\sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right),
\eq
discovering a pulse train whose Fourier transform is itself.
% diagn: check forward references.
Table~\ref{fouri:120:tbl20} summarizes. Besides gathering transform
pairs from this and earlier sections, the table also covers the Gaussian
pulse of \S~\ref{fouri:130}.
\begin{table}
 \caption{Fourier transform pairs.}
 \label{fouri:120:tbl20}
 \index{Fourier transform pair}
 \index{transform pair}
 \bqb
 1 &\fouripair& \left(\sqrt{2\pi}\right) \delta(v) \\
 u(v) &\fouripair& \frac{1}{\left(\sqrt{2\pi}\right)iv} + \frac{\sqrt{2\pi}}{2} \delta(v) \\
 \delta(v) &\fouripair& \frac{1}{\sqrt{2\pi}} \\
 \Pi(v) &\fouripair& \frac{\sinarg(v/2)}{\sqrt{2\pi}} \\
 \Lambda(v) &\fouripair& \frac{\sinarg^2(v/2)}{\sqrt{2\pi}} \\
 u(v)e^{av} &\fouripair& \frac{1}{\left(\sqrt{2\pi}\right)(a+iv)},
 \ \ \Re(a) > 0 \\
 u(v)e^{av}v^n &\fouripair& \frac{1}{\left(\sqrt{2\pi}\right)n!(a+iv)^{n+1}}, \\
 &&\ \ \ \ \ \ \Re(a) > 0,\ n \in \mathbb Z, \ n \ge 0 \\
 e^{iav} &\fouripair& \left(\sqrt{2\pi}\right) \delta(va) \\
 \sin av &\fouripair& \frac{\sqrt{2\pi}}{j2}\left[ \delta(va)  \delta(v+a) \right] \\
 \cos av &\fouripair& \frac{\sqrt{2\pi}}{2}\left[ \delta(va) + \delta(v+a) \right] \\
 \sinarg(v) &\fouripair& \frac{\sqrt{2\pi}}{2}\Pi\left(\frac v2\right) \\
 \sinarg^2(v) &\fouripair& \frac{\sqrt{2\pi}}{2}\Lambda\left(\frac v2\right) \\
 \sum_{j=\infty}^{\infty} \delta(vjT_1) &\fouripair&
 \frac{\sqrt{2\pi}}{T_1} \sum_{j=\infty}^{\infty} \delta\left(vj\frac{2\pi}{T_1}\right) \\
 \sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right) &\fouripair&
 \sum_{j=\infty}^{\infty} \delta\left(vj\sqrt{2\pi}\right) \\
 \Omega(v) &\fouripair& \Omega(v)
 \eqb
\end{table}

% 

\section[The Fourier transform of integration]%
 {The Fourier transform of the integration operation}
\label{fouri:125}
\index{Fourier transform!of integration}
\index{integration!Fourier transform of}

Though it includes the Fourier transform of the differentiation
operation, Table~\ref{fouri:110:tbl20} omits the complementary
identity
\bq{fouri:125:10}
 \int_{\infty}^v f(\tau) \,d\tau \fouripair
 \frac{F(v)}{iv}
 + \frac{2\pi}{2} F(0) \delta(v),
\eq
the Fourier transform of the integration operation, for when we compiled
the table we lacked the needed theory. We have the theory now.%
\footnote{\cite[Prob.~5.33]{Hsu:sig}}

To develop~(\ref{fouri:125:10}), we begin by observing that one can
express the integration in question in either of the equivalent forms
\[
 \int_{\infty}^v f(\tau) \,d\tau =
 \int_{\infty}^\infty u\left(\frac v2 \tau\right)
 f\left(\frac v2 + \tau\right) \,d\tau.
\]
Invoking an identity of Table~\ref{fouri:110:tbl40} on the rightward
form, then substituting the leftward form, yields the Fourier pair
\bqb
 \int_{\infty}^v f(\tau) \,d\tau &\fouripair&
 \left(\sqrt{2\pi}\right) H(v) F(v), \\
 H(v) &\equiv& \mathcal F\{u(v)\}.
\eqb
But according to Table~\ref{fouri:120:tbl20}, $\mathcal F\{u(v)\} =
1/[(\sqrt{2\pi})iv] + (\sqrt{2\pi}/2)\delta(v)$, so
%\frac{1}{\left(\sqrt{2\pi}\right)iv} + \frac{\sqrt{2\pi}}{2} \delta(v)
\[
 \int_{\infty}^v f(\tau) \,d\tau \fouripair
 \frac{F(v)}{iv} + \frac{2\pi}{2} F(v) \delta(v),
\]
of which sifting the rightmost term produces~(\ref{fouri:125:10}).
+This completes the derivations of the Fourier transform pairs of
+Tables~\ref{fouri:120:tbl20} and~\ref{fouri:120:tbl25}except one pair.
+The one pair will be the subject of \S~\ref{fouri:130}, next.
% 
@@ 1633,40 +1928,40 @@ of which sifting the rightmost term prod
\label{fouri:130}
\index{Gauss, Carl Friedrich (17771855)}
\index{Gaussian pulse}
\index{pulse, Gaussian}
+\index{pulse!Gaussian}
While studying the derivative in Chs.~\ref{drvtv} and~\ref{cexp}, we
+While studying the derivative in chapters~\ref{drvtv} and~\ref{cexp}, we
asked among other questions whether any function could be its own
derivative. We found that a sinusoid could be its own derivative after
a fashiondifferentiation shifted its curve leftward but did not alter
its shapebut that the only nontrivial function to be exactly its own
derivative was the natural exponential $f(z)=Ae^z$. We later found the
+a fashiondifferentiation shifted its curve leftward without altering
+its shape or scalebut that the only nontrivial function to be
+exactly its own derivative was the natural exponential $f(z)=Ae^z$. We
+later found the
same natural exponential to fill several significant mathematical
roleslargely, whether directly or indirectly, because it was indeed
its own derivative.
Studying the Fourier transform, the question now arises again: can any
function be its own transform? Well, we have already found in
\S~\ref{fouri:120} that the Dirac delta pulse train can be; but this
train unlike the natural exponential is abrupt and ungraceful, perhaps
not the sort of function one had in mind. One should like an analytic
function, and preferably not a train but a single pulse.
+As we study the Fourier transform, a similar question arises: can any
+function be its own \emph{transform?} Well, we have already found in
+\S~\ref{fouri:120.35} that the Dirac delta pulse train can be its own
+transform; but this train unlike the natural exponential is
+nonanalytic, perhaps not the sort of function one had in mind. One
+should like an analytic function, and preferably not a train but a
+single pulse.
\index{bell curve}
\index{cleverness}
\index{$\Omega$ as the Gaussian pulse}
In Chs.~\ref{specf} and~\ref{prob}, during the study of special
functions and probability, we shall encounter a most curious function,
+In chapter~\ref{prob}, during the study of the mathematics of
+probability, we shall encounter a most curious function,
the \emph{Gaussian pulse,} also known as the \emph{bell curve} among
other names. We will defer discussion of the Gaussian pulse's
provenance to the coming chapters but, for now, we can just copy here
the pulse's definition
% diagn: check the next reference.
from~(\ref{prob:normdist}) as
\bq{fouri:130:10}
\Omega(t) \equiv \frac{\exp\left(t^2/2\right)}{\sqrt{2\pi}},
\eq
% diagn: check the rest of the sentence, which is new.
plotted on pages~\pageref{prob:normdistfig} below
and~\pageref{fours:095:fig1} above, respectively in
Figs.~\ref{prob:normdistfig} and~\ref{fours:095:fig1}.
@@ 1676,7 +1971,7 @@ compute it include the following.%
\footnote{
An alternate technique is outlined in \cite[Prob.~5.43]{Hsu:sig}.
}
From the definition~(\ref{fouri:byu}) of the Fourier transform,
+From the the Fourier transform's definition~(\ref{fouri:byu}),
\[
\mathcal F\{ \Omega(v) \}
= \frac{1}{2\pi} \int_{\infty}^{\infty}
@@ 1711,8 +2006,8 @@ should find such an integral hard to int
since happily we have studied it, observing that the integrand
$\exp(\xi^2)$ is an entire function (\S~\ref{taylor:330})
of~$\xi$that is, that it is everywhere analyticwe recognize that
one can trace the path of integration from $\infty+i\theta$ to
$\infty+i\theta$ along any contour one likes. Let us trace it along the
+one can trace the path of integration from $\infty+iv$ to
+$\infty+iv$ along any contour one likes. Let us trace it along the
real Argand axis from~$\infty$ to~$\infty$, leaving only the two, short
complex segments at the ends which (as is easy enough to see, and the
formal proof is left as an exercise to the interested reader%
@@ 1732,7 +2027,7 @@ So tracing leaves us with
\,d\xi.
\eq
How to proceed from here is not immediately obvious. None of the
techniques of Ch.~\ref{inttx} seems especially suitable to evaluate
+techniques of chapter~\ref{inttx} seems especially suitable to evaluate
\[
I \equiv \int_{\infty}^{\infty}
\exp\left( \frac{\xi^2}{2} \right)
@@ 1758,7 +2053,7 @@ Here is the technique.%
%The author has never heard who first discovered the techniqueit
%might have been Gauss himselfand regrettably has misplaced the
%citation by which he first learned of it in print long ago (see
 %Appendix~\ref{hist}). However, he has since heard the technique
+ %appendix~\ref{hist}). However, he has since heard the technique
%orally explained at least twice, including
%by~\cite{Brownconversation} if memory serves. The technique thus
%would not seem especially obscure. In any event, the technique is now
@@ 1804,8 +2099,8 @@ cylindrical coordinates $(\rho; \phi)$,
\exp\left( \frac{\rho^2}{2} \right)
\rho\,d\rho \right].
\eqb
At a casual glance, the last integral in square brackets does not look
much different from the integral with which we started, but see: it is
+At a casual glance, the last integral in square brackets seems to
+differ little from the integral with which we started, but see: it is
not only that the lower limit of integration and the letter of the dummy
variable have changed, but that an extra factor of the dummy variable
has appearedthat the integrand ends not with~$d\rho$ but
@@ 1839,14 +2134,14 @@ have that
\]
which in view of~(\ref{fouri:130:10}) reveals the remarkable transform pair
\bq{fouri:130:50}
 \Omega(v) \fouripair \Omega(v),
+ \Omega(v) \fouripair \Omega(v).
\eq
The Gaussian pulse transforms to itself. Old Fourier, who can twist and
knot other curves with ease, seems powerless to bend Gauss' mighty
curve.
\index{Gaussian pulse!to implement the Dirac delta by}
\index{pulse, Gaussian!to implement the Dirac delta by}
+\index{pulse!Gaussian, to implement the Dirac delta by}
\index{Dirac delta function!as implemented by the Gaussian\\pulse} % bad break
\index{delta function, Dirac!as implemented by the Gaussian\\pulse} % bad break
It is worth observing incidentally in light of~(\ref{fouri:130:10})
@@ 1857,7 +2152,7 @@ and~(\ref{fouri:130:25}) that
the same as for $\Pi(t)$, $\Lambda(t)$ and indeed $\delta(t)$.
Section~\ref{fours:095} and its~(\ref{fours:095:33}) have recommended
the shape of the Gaussian pulse, in the tall, narrow limit, to implement
the Dirac delta $\delta(t)$. This section lends a bit more force to the
+the Dirac delta $\delta(t)$. This section lends more force to the
recommendation, for not only is the Gaussian pulse analytic (unlike the
Dirac delta) but it also behaves uncommonly well under Fourier
transformation (like the Dirac delta), thus rendering the Dirac delta
@@ 1878,36 +2173,111 @@ practical mathematical virtues exhibited
The Gaussian pulse resembles the natural exponential in its general
versatility. Indeed, though the book has required several chapters
through this Ch.~\ref{fouri} to develop the fairly deep mathematics
+through this chapter~\ref{fouri} to develop the fairly deep mathematics
underlying the Gaussian pulse and supporting its basic application, now
that we have the Gaussian pulse in hand we shall find that it ably fills
all sorts of rolesnot least the principal role of Ch.~\ref{prob} to
+all sorts of rolesnot least the principal role of chapter~\ref{prob} to
come.
% 
+\section[The Fourier transform of integration]%
+ {The Fourier transform of the integration operation}
+\label{fouri:125}
+\index{Fourier transform!of integration}
+\index{integration!Fourier transform of}
+
+Table~\ref{fouri:110:tbl20}, page~\pageref{fouri:110:tbl20}, includes
+one heretofore unproved Fourier property,
+\bq{fouri:125:10}
+ \int_{\infty}^v f(\tau) \,d\tau \fouripair
+ \frac{F(v)}{iv}
+ + \pi F(0) \delta(v).
+\eq
+This property has remained unproved because, when we compiled the table
+in \S~\ref{fouri:110}, we lacked the needed theory. We have
+the theory now and can proceed with the
+proof.\footnote{\cite[Prob.~5.33]{Hsu:sig}}
+
+The proof begins with the observation that
+\[
+ \int_{\infty}^v f(\tau) \,d\tau =
+ u(v) \ast f(v) =
+ \int_{\infty}^\infty u\left(\frac v2 \tau\right)
+ f\left(\frac v2 + \tau\right) \,d\tau,
+\]
+where the $u(t) \ast f(t)$ exercises the convolution operation of
+\S~\ref{fouri:110.40}. The convolution's correctness is probably easier
+to see if the convolution is expressed according
+to~(\ref{fouri:110:40a}) rather than to~(\ref{fouri:110:40}), as
+\[
+ \int_{\infty}^v f(\tau) \,d\tau =
+ u(v) \ast f(v) =
+ \int_{\infty}^\infty u(v \tau) f(\tau) \,d\tau;
+\]
+but the two forms are equivalent and, since we are free to work with
+either and the earlier is the form that appears in
+Table~\ref{fouri:110:tbl40}, we will prefer the earlier form.
+
+\index{Dirac delta function!sifting property of}
+\index{delta function, Dirac!sifting property of}
+\index{sifting property}
+Table~\ref{fouri:110:tbl40} records the transform pair
+\bqb
+ \lefteqn{
+ \int_{\infty}^{\infty}
+ u\left(\frac{v}{2}\tau\right)
+ f\left(\frac{v}{2}+\tau\right)
+ \,d\tau}
+ &&\\
+ &\fouripair&
+ \left(\sqrt{2\pi}\right) \mathcal F\{u(v)\} F(v) =
+ \left[\frac{1}{iv} + \pi \delta(v)\right] F(v)
+\eqb
+in which $\mathcal F\{u(v)\}$ is evaluated according to
+Table~\ref{fouri:120:tbl20}. Substituting the observation with which we
+began,
+\[
+ \int_{\infty}^v f(\tau) \,d\tau =
+ u(v) \ast f(v)
+ \fouripair
+ \left[\frac{1}{iv} + \pi \delta(v)\right] F(v).
+\]
+Sifting, $\delta(v)F(v) = \delta(v)F(0)$, so the last pair
+is in fact~(\ref{fouri:125:10}) which was to be proved.
+
+A fact that has incidentally emerged during the proof,
+\bq{fouri:125:20}
+ \int_{\infty}^v f(\tau) \,d\tau = u(v) \ast f(v),
+\eq
+is interesting enough to merit here an equation number of its own.
+
+% 
+
\section{The Laplace transform}
\label{fouri:200}
\index{Laplace transform}
\index{transform!Laplace}
\index{Laplace, PierreSimon (17491827)}
Equation~(\ref{fouri:byu2}), defining the Fourier transform in
the~$\mathcal{F}_{\omega t}$ notation, transforms pulses
like those of Figs.~\ref{fouri:100:fig} and~\ref{fours:095:fig1}
straightforwardly but stumbles on timeunlimited functions like $f(t) =
+Fourier straightforwardly transforms pulses like those of
+Figs.~\ref{fouri:100:fig} (page
+% bad break
+\pageref{fouri:100:fig})
+and~\ref{fours:095:fig1} (page~\pageref{fours:095:fig1}) but stumbles
+on timeunlimited functions like $f(t) =
\cos \omega_o t$ or even the ridiculously simple $f(t)=1$. Only by the
indirect techniques of \S\S~\ref{fouri:110} and~\ref{fouri:120} have we
been able to transform such functions. Such indirect techniques are
valid and even interesting, but nonetheless can prompt the tasteful
mathematician to wonder whether a simpler alternative to the Fourier
transform were not possible.
+clever, indirect techniques of \S\S~\ref{fouri:110} and~\ref{fouri:120}
+has Fourier been able to transform such functions at all. Fourier's
+clever, indirect techniques are valid and even interesting but can still
+prompt a mathematician to wonder whether a simpler alternative to Fourier
+did not exist.
At the sometimes acceptable cost of omitting one of the Fourier
integral's two tails,%
\footnote{
There has been invented a version of the Laplace transform which omits
 no tail~\cite[Ch.~3]{Hsu:sig}. This book does not treat it.
+ no tail~\cite[chapter~3]{Hsu:sig}. This book does not treat it.
}
the \emph{Laplace transform}
\bq{fouri:laplace}
@@ 1918,47 +2288,74 @@ offers such an alternative. Here, $s=j\
variable and, when~$s$ is purely imaginary, the Laplace transform is
very like the Fourier; but Laplace's advantage lies in that it
encourages the use of a complex~$s$, usually with a negative real part,
which in~(\ref{fouri:laplace})'s integrand suppresses even the tail the
transform does not omit, thus effectively converting even a
timeunlimited function to an integrable pulseand Laplace does so
without resort to indirect techniques.%
\footnote{\cite[Ch.~3]{Hsu:sig}\cite[Ch.~7]{Phillips/Parr}\cite[Ch.~19]{JJH}}

As we said, the Laplace transform's
definition~(\ref{fouri:laplace})quite unlike the Fourier transform's
definition~(\ref{fouri:byu2})tends to transform simple functions
straightforwardly. For instance, the pair
+which in~(\ref{fouri:laplace})'s integrand tends to suppress even the
+tail not omitted, thus effectively converting a timeunlimited function
+to an integrable pulseand Laplace does all this without resort to
+indirect techniques.%
+\footnote{\cite[chapter~3]{Hsu:sig}\cite[chapter~7]{Phillips/Parr}\cite[chapter~19]{JJH}}
+
+\index{limit of integration}
+\index{$0^{}$ and~$0^{+}$ as limits of integration}
+The lower limit~$0^{}$ of $\int_{0^{}}^\infty$ integration means that
+the integration includes $t=0$. In other words,
+$\int_{0^{}}^\infty \delta(t)dt = 1$ whereas
+$\int_{0^{+}}^\infty \delta(t)dt = 0$.
+
+\index{pattern!filling of}
+\index{filling of a pattern}
+\index{Laplace transform!of an integral}
+\index{integral!Laplace transform of}
+\index{Laplace transform!of a derivative}
+\index{derivative!Laplace transform of}
+Timeunlimited functions Laplace can straightforwardly transform
+by
+% bad break
+(\ref{fouri:laplace})without resort to clever, indirect
+techniquesinclude
\[
1 \laplair \frac{1}{s}.
\]
(in which one can elaborate the symbol~$\mathcal L$
as~$\mathcal{L}_{st}$ if desired) comes by direct application of the
definition. The first several of the Laplace properties of
Table~\ref{fouri:laplaceproperties} likewise come by direct
application. The differentiation property comes by
\[
 \mathcal L\left\{ \frac{d}{dt} f(t) \right\}
 = \int_{0^{}}^\infty e^{st} \frac{d}{dt} f(t) \,dt
 = \int_{0^{}}^\infty e^{st} \,d\left[f(t)\right]
\]
and thence by integration by parts (\S~\ref{inttx:230}); whereafter the
higherorder differentiation property comes by repeated application of
the differentiation property.
The integration property merely reverses the integration property on the
function $g(t) \equiv \int_{0^{}}^\infty f(\tau) \,d\tau$, for which
$dg/dt = f(t)$ and $g(0^{}) = 0$. The ramping property comes by
+Others are listed in Table~\ref{fouri:laplaceproperties}.
+Laplace \emph{properties}, some of which are derived in the same easy
+way, are listed in Table~\ref{fouri:laplacepairs}\@. Further
+Laplace properties, also listed in the table, want some technique to
+derive, for instance the differentiation property, which comes by
+\[
+ \begin{split}
+ \mathcal L\left\{ \frac{d}{dt} f(t) \right\}
+ &= \int_{0^{}}^\infty e^{st} \frac{d}{dt} f(t) \,dt
+ = \int_{t=0^{}}^\infty e^{st} \,d\left[f(t)\right]
+ \\&=
+ \left. e^{st} f(t) \right_{0^{}}^\infty
+ + s \int_{0^{}}^\infty e^{st} f(t)\,dt
+ \\&= f(0^{}) + sF(s)
+ \end{split}
+\]
+via the byparts method of \S~\ref{inttx:230}.
+The integration property merely reverses the differentiation property on the
+function $g(t) \equiv \int_{0^{}}^t f(\tau) \,d\tau,$
+for which $g(0^{}) = 0$, filling the~``?'' with $F(s)/s$ in this pattern:%
+{
+ \settowidth\tla{$\ds\int_{0^{}}^t f(\tau) \,d\tau$}
+ \settowidth\tlb{$\ds 0 + sG(s)$}
+ \bqb
+ g(t) = \int_{0^{}}^t f(\tau) \,d\tau &\fouripair& \makebox[\tlb][l]{$\ds G(s)$} = \ ?\ ; \\
+ \frac{dg}{dt} = \makebox[\tla][r]{$\ds f(t)$} &\fouripair& 0 + sG(s) = F(s).
+ \eqb
+}%
+The ramping property comes by
differentiating and negating~(\ref{fouri:laplace}) as
\[
\frac{d}{ds} F(s)
= \frac{d}{ds} \int_{0^{}}^\infty e^{st} f(t) \,dt
= \int_{0^{}}^\infty e^{st} [tf(t)] \,dt
 = \mathcal L \{tf(t)\};
+ = \mathcal L \{tf(t)\}.
\]
whereafter again the higherorder property comes by repeated
+Higherorder properties come by repeated
application. The convolution property comes as it did in
\S~\ref{fouri:110.40} except that here we take advantage the
presence of Heaviside's unit step to manipulate the limits of
integration, beginning
+\S~\ref{fouri:110.40} except that here we take advantage of the
+presence of Heaviside's unit step $u(\cdot)$ to manipulate the limits
+of integration, beginning
\bqb
\lefteqn{
\mathcal{L}\left\{
@@ 1981,7 +2378,7 @@ integration, beginning
\eqb
wherein evidently $u(t/2\psi)u(t/2+\psi)=0$ for all $t < 0$, regardless
of the value of~$\psi$. As in \S~\ref{fouri:110.40}, here also we
change $\phi \la t/2 + \psi$ and $\chi \la \phi  2\psi$, eventually
+change $\phi \la t/2 + \psi$ and $\mu \la \phi  2\psi$, eventually
reaching the form
\bqb
\lefteqn{
@@ 1996,7 +2393,7 @@ reaching the form
}
&&\\&=&
\left[
 \int_{\infty}^{\infty} e^{s\chi} u(\chi)h(\chi) \,d\chi
+ \int_{\infty}^{\infty} e^{s\mu} u(\mu)h(\mu) \,d\mu
\right]
\left[
\int_{\infty}^{\infty} e^{s\phi} u(\phi)f(\phi) \,d\phi
@@ 2011,18 +2408,16 @@ thus completing the convolution property
\bqb
u(tt_o)f(tt_o) &\laplair& e^{st_o}F(s) \\
e^{at}f(t) &\laplair& F(s+a) \\
 Af(\alpha t) &\laplair& \frac{A}{\alpha}F\left(\frac{s}{\alpha}\right)
 \ \ \mbox{if $\Im(\alpha)=0$, $\Re(\alpha) > 0$} \\
+ Af(\alpha t) &\laplair& \frac{A}{\alpha}F\left(\frac{s}{\alpha}\right),
+ \ \Im(\alpha)=0,\ \Re(\alpha) > 0 \\
A_1f_1(t) + A_2f_2(t) &\laplair& A_1F_1(t) + A_2F_2(t) \\
\frac{d}{dt}f(t) &\laplair& sF(s)  f(0^{}) \\
+ \frac{d^2}{dt^2}f(t) &\laplair& s^2F(s)  sf(0^{}) \left[\frac{df}{dt}\right]_{t=0^{}} \\
\frac{d^n}{dt^n}f(t) &\laplair& s^nF(s)
 \sum_{k=0}^{n1}\left\{s^k\left[\frac{d^{n1k}}{dt^{n1k}}f(t)\right]_{t=0^{}}\right\} \\
 \int_{0^{}}^\infty f(\tau) \,d\tau &\laplair& \frac{F(s)}{s} \\
+ \int_{0^{}}^t f(\tau) \,d\tau &\laplair& \frac{F(s)}{s} \\
tf(t) &\laplair& \frac{d}{ds}F(s) \\
t^nf(t) &\laplair& ()^n\frac{d^n}{ds^n}F(s) \\
 % I lack a ready proof of the next property, commented out.
 % It's probably not important enough for this table, anyway.
 %\int_{0^{}}^t f(\tau) \,d\tau &\laplair& \frac{F(s)}{s} \\
\left[u(t)h(t)\right] \ast \left[u(t)f(t)\right] &\laplair& H(s) F(s)
\eqb
\end{table}
@@ 2033,8 +2428,11 @@ thus completing the convolution property
\[
\renewcommand\arraystretch{2.0}
\br{rclcrcl}
 \delta(t) &\laplair& 1 &\ \ &
 && \\
+ &&&&
+ \ds e^{at}t\sin\omega_o t &\laplair& \ds \frac{2(s+a)\omega_o}{[(s+a)^2+\omega_o^2]^2} \\
+ \delta(t) &\laplair& 1 &&
+ % diagn: the pair on the next line has been corrected; check it
+ \ds e^{at}t\cos\omega_o t &\laplair& \ds \frac{(s+a)^2\omega_o^2}{[(s+a)^2+\omega_o^2]^2} \\
\ds 1 &\laplair& \ds \frac{1}{s} &&
\ds e^{at} &\laplair& \ds \frac{1}{s+a} \\
\ds t &\laplair& \ds \frac{1}{s^2} &&
@@ 2048,21 +2446,24 @@ thus completing the convolution property
\er
\]
\end{table}
As Table~\ref{fouri:laplaceproperties} lists Laplace properties, so
Table~\ref{fouri:laplacepairs} lists Laplace transform pairs. As the
former table's, most too of the latter table's entries come by
direct application of the Laplace transform's
definition~(\ref{fouri:laplace}) (though to reach the sine and cosine
entries one should first split the sine and cosine functions per
Table~\ref{cexp:tblprop} into their complex exponential components).
The pair $t \laplair 1/s^2$ comes by application of the property that
+Splitting the sine and cosine functions into their complex exponential
+components according to Table~\ref{cexp:tblprop}, application of
+Laplace's definition
+\linebreak % bad break
+(\ref{fouri:laplace})
+to each component yields
+Table~\ref{fouri:laplacepairs}'s sine and cosine pairs.
+The pair $t \laplair 1/s^2$ of the latter table
+comes by application of the property that
$tf(t) \laplair (d/ds)F(s)$ to the pair $1 \laplair 1/s$, and the pair
$t^n \laplair n!/s^{n+1}$ comes by repeated application of the same
property. The pairs transforming $e^{at}t$ and $e^{at}t^n$ come
similarly.
+property. The pairs transforming $e^{at}t\sin\omega_o t$,
+$e^{at}t\cos\omega_o t$, $e^{at}t$ and $e^{at}t^n$ come similarly.
In the application of either table,~$a$ may be, and~$s$ usually is,
complex, but~$\alpha$ and~$t$ are normally real.
+During the application of either
+Table~\ref{fouri:laplaceproperties} or
+Table~\ref{fouri:laplacepairs},~$a$ may be, and~$s$ usually is,
+complex; but~$\alpha$, $\omega_o$ and~$t$ are normally real.
% 
@@ 2078,44 +2479,43 @@ complex, but~$\alpha$ and~$t$ are normal
\index{domain!transform}
\index{Laplace transform!comparison of against the Fourier transform}
\index{Fourier transform!comparison of against the Laplace transform}
The Laplace transform is curious, but admittedly one often
finds in practice that the more straightforwardthough harder to
analyzeFourier transform is a better tool for frequencydomain
analysis, among other reasons because Fourier brings an inverse
transformation formula~(\ref{fouri:byu2}) whereas Laplace does not.%
\footnote{
 Actually, formally, Laplace does bring an uncouth contourintegrating
 inverse transformation formula in footnote~\ref{fouri:250:fn30}, but
 we'll not use it.
+The Laplace transform is curious, but Fourier is admittedly
+more straightforward even if it is harder to
+analyze. Though exceptions exist, the Fourier transform is probably
+the better general tool for frequencydomain analysis, among other
+reasons because Fourier brings an inverse transformation
+formula~(\ref{fouri:byu2}) whereas Laplace does
+not.\footnote{%
+ Actually, formally, Laplace does support an
+ inverse transformation formula, $u(t)f(t) = (1/i2\pi)
+ \int_{i\infty}^{i\infty} e^{st} F(s)\,ds$, but to apply this
+ inverse requires contour integration~\cite[eqn.~7.2]{Phillips/Parr}\@.
+ The writer has no experience with it. We'll not use it. It
+ comes of changing $s \la i\omega$ in~(\ref{fouri:eqn}).%
}
This depends on the application. However, by the way, another use use
for the Laplace transform happens to arise. The latter use emerges from
the Laplace property of Table~\ref{fouri:laplaceproperties} that
$(d/dt)f(t) \laplair sF(s)  f(0^{})$, according to which, evidently,
\emph{differentiation in the time (untransformed) domain corresponds to
multiplication by the transform variable~$s$ in the frequency
(transformed) domain.}
\index{initial condition}
Now, one might say the same of the Fourier transform, for it has a
differentiation property, too, $(d/dv)f(v) \fouripair ivF(s)$, which
looks rather alike. The difference however lies in Laplace's extra
Laplace term $f(0^{})$ which, significantly, represents the
untransformed function's initial condition.
+Laplace excels Fourier in its property of
+Table~\ref{fouri:laplaceproperties} that $(d/dt)f(t) \laplair sF(s) 
+f(0^{})$. Fourier's corresponding property of
+Table~\ref{fouri:110:tbl20} lacks the $f(0^{})$, an initial condition.
\index{state space}
To see the significance, consider for example the linear differential
+To see why this matters, consider for example the linear differential
equation%
\footnote{\cite[Example~19.31]{JJH}}$\mbox{}^{,}$%
\footnote{
 It is rather enlightening to study the same differential equation,
 written in the \emph{statespace} style \cite[Ch.~8]{Phillips/Parr}
+\footnote{\index{state space}%
+ It is enlightening to study the same differential equation
+ in \emph{statespace} style \cite[chapter~8]{Phillips/Parr},
\[
\frac{d}{dt}\ve f(t) = \mf{rr}{ 0 & 1 \\ 3 & 4 } \ve f(t)
+ \mf{c}{0 \\ e^{2t}},\ \ %
\ve f(0) = \mf{c}{1 \\ 2},
\]
 where $\ve f(t) \equiv [1\;d/dt]^{T}f(t)$. The effort required to
+ where
+ \[
+ \ve f(t) \equiv \mf{c}{1\\d/dt}f(t).
+ \]
+ The effort required to
assimilate the notation rewards the student with significant insight
into the manner in which initial conditionshere symbolized $\ve
f(0)$determine a system's subsequent evolution.
@@ 2158,7 +2558,7 @@ Combining like terms,
\]
Multiplying by $s+2$ and rearranging,
\[
 (s+2)(s^2+4s+3)F(s) = s^2+8s+13.
+ (s+2)(s^2+4s+3)F(s) = s^2+8s+\mbox{0xD}.
\]
Isolating the heretofore unknown frequencydomain function $F(s)$,
\[
@@ 2180,42 +2580,12 @@ Table~\ref{fouri:laplacepairs}, term by
\[
f(t) = 3e^{t}  e^{2t}  e^{3t}
\]
results to the differential equation with which we started.%
\footnote{\label{fouri:250:fn30}%
 The careful reader might object that we have never proved that the
 Laplace transform cannot map distinct timedomain functions atop one
 another in the frequency domain; that is, that we have never shown
 the Laplace transform to be invertible. The objection has merit.
 Consider for instance the timedomain function $f_2(t) = u(t)[3e^{t}
  e^{2t}  e^{3t}]$, whose Laplace transform does not differ from
 that of $f(t)$.

 However, even the careful reader will admit that the suggested
 $f_2(t)$ differs from $f(t)$ only over $t < 0$, a domain Laplace
 ignores. What one thus ought to ask is whether the Laplace transform
 can map timedomain functions, the functions being \emph{distinct for
 $t \ge 0$,} atop one another in the frequency domain.

 In one sense it may be unnecessary to answer even the latter question,
 for one can check the correctness, and probably also the sufficiency,
 of any solution Laplace might offer to a particular differential
 equation by the expedient of substituting the solution back into the
 equation. However, one can answer the latter question formally
 nonetheless by changing $s \la i\omega$ in~(\ref{fouri:eqn}) and
 observing the peculiar, contourintegrating inverse of the Laplace
 transform, $f(t) = (1/i2\pi) \int_{i\infty}^{i\infty} e^{st} F(s)
 \,ds$, which results \cite[eqn.~7.2]{Phillips/Parr}\@. To consider
 the choice of contours of integration and otherwise to polish the
 answer is left as an exercise to the interested reader; here it is
 noted only that, to cause the limits of integration involved to behave
 nicely, one might insist as a precondition to answering the question
 something like that $f(t) = 0$ for all $t<0$, the precondition being
 met by any $f(t) = u(t)g(t)$ (in which $u[t]$ is formally defined for
 the present purpose such that $u[0] = 1$).
}
+results.
+One can verify the solution by substituting it back into
+differential equation.
+%Laplace has fathomed the differential equation.
This section's Laplace technique neatly solves many linear differential
equations.
+Laplace can solve many linear differential equations in this way.
% 
@@ 2235,9 +2605,7 @@ The Laplace transform's \emph{initial}
\lim_{t\ra\infty} f(t) &= \lim_{s \ra 0} sF(s),
\end{split}
\eq
meet this want. (Note that these are not transform pairs as in
Tables~\ref{fouri:laplaceproperties} and~\ref{fouri:laplacepairs} but
actual equations.)
+meet this want.
One derives the initialvalue theorem by the successive steps
\bqb
@@ 2291,17 +2659,20 @@ transformation, into the more general, s
\footnote{
The choice of sign here is a matter of convention, which differs by
discipline. This book tends to reflect its author's preference for
 $e^{i(\omega t + \ve k\cdot\ve r)}$, convenient in electrical
+ $f(\ve r,t) \sim \int e^{i(+\omega t  \ve
+ k\cdot\ve r)}F(\ve k, \omega)d\omega\,d\ve k$, convenient in electrical
modeling but slightly less convenient in quantummechanical work.
}
$e^{i(\pm\omega t \mp\ve k\cdot\ve r)}$; where~$\ve k$ and~$\ve r$ are
threedimensional geometrical vectors and~$\ve r$ in particular
represents a position in space. To review the general interpretation
and use of such a factor lies beyond the chapter's scope but the
factor's very form,
+and use of such a factor lies beyond the
+% diagn: on the next line, chapter's or book's?
+book's
+scope but the factor's very form,
\[
 e^{i(\mp\omega t \pm\ve k\cdot\ve r)}
 = e^{i(\mp\omega t \pm k_xx \pm k_yy \pm k_zz)},
+ e^{i(\pm\omega t \mp\ve k\cdot\ve r)}
+ = e^{i(\pm\omega t \mp k_xx \mp k_yy \mp k_zz)},
\]
suggests Fourier transformation with respect not merely to time but also
to space. There results the \emph{spatial Fourier transform}
diff pruN 0.53.201204142/tex/fours.tex 0.56.20180123.12/tex/fours.tex
 0.53.201204142/tex/fours.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/fours.tex 20180123 23:52:17.000000000 +0000
@@ 4,11 +4,13 @@
\index{series!Fourier}
\index{Fourier, Jean Baptiste Joseph\\(17681830)} % bad break
+% This chapterleading paragraph is a bit arch, but I've not yet
+% been able to think of anything better to write here.
\index{Kelvin, Lord (18241907)}
It might fairly be said that, among advanced mathematical techniques,
+It might be said that, among advanced mathematical techniques,
none is so useful, and few so appealing, as the one Lord Kelvin has
acclaimed ``a great mathematical poem.''%
\footnote{\cite[Ch.~17]{JJH}}
+\footnote{\cite[chapter~17]{JJH}}
It is the Fourier transform, which this chapter and the next will develop.
This first of the two chapters brings the Fourier transform in its
primitive guise as the \emph{Fourier series.}
@@ 20,7 +22,7 @@ primitive guise as the \emph{Fourier ser
\index{waveform!repeating}
\index{repeating waveform}
\index{period}
The Fourier series is an analog of the Taylor series of Ch.~\ref{taylor}
+The Fourier series is an analog of the Taylor series of chapter~\ref{taylor}
but meant for \emph{repeating waveforms,} functions $f(t)$ of which
\bq{fours:000:11}
f(t) = f(t+nT_1), \ \Im(T_1) = 0, \ T_1 \neq 0,
@@ 82,8 +84,8 @@ Fig.~\ref{fours:000:fig10}.
\ec
\end{figure}
A Fourier series expands such a repeating waveform as a superposition
of complex exponentials or, alternately, if the waveform is real, of
sinusoids.
+of complex exponentials (or, equivalently, if the waveform is real, as a
+superposition of sinusoids).
\index{waveform!approximation of}
\index{primary frequency}
@@ 246,11 +248,13 @@ or alternately in discrete form as that
Because the product $\Delta\omega\, T_1 = 2\pi$ relates~$\Delta\omega$
to~$T_1$, the symbols~$\Delta\omega$ and~$T_1$ together represent
in~(\ref{fours:080:08}) and~(\ref{fours:080:10}) not two but only
one independent parameter. If~$T_1$ bears physical units then these
typically will be units of time (seconds, for instance),
+one parameter; you cannot set them independently, for the one merely
+inverts the other.
+If~$T_1$ bears physical units then these
+will typically be units of time (seconds, for instance),
whereupon~$\Delta\omega$ will bear the corresponding units of angular
frequency (such as radians per second). The frame offset~$t_o$ and the
dummy variable~$\tau$ naturally must have the same dimensions%
+dummy variable~$\tau$ must have the same dimensions%
\footnote{
The term \emph{dimension} in this context refers to the kind of
physical unit. A quantity like~$T_1$ for example, measurable in seconds
@@ 259,8 +263,7 @@ dummy variable~$\tau$ naturally must hav
time can be expressed in miles per hour as well as in meters per
second but not directly, say, in volts per centimeter; and so on.
}%
~$T_1$ has and normally will bear the same units. This matter is
discussed further in \S~\ref{fours:085}.
+~$T_1$ has. This matter is discussed further in \S~\ref{fours:085}.
\index{physical insight}
\index{insight}
@@ 279,7 +282,7 @@ precisely, eqn.~\ref{fours:080:15} repre
balanced, uniformly spaced selection of phases). An appeal to symmetry
forbids such sums from favoring any one phase $n \,\Delta\omega\, \tau$
or $2\pi nj/N$ over any other. This being the case, how could the sums
of~(\ref{fours:080:10}) and~(\ref{fours:080:15}) ever come to any totals
+of~(\ref{fours:080:10}) and~(\ref{fours:080:15}) come to any totals
other than zero? The plain answer is that they can come to no other
totals. A step in every direction is indeed no step at all. This is
why~(\ref{fours:080:10}) and~(\ref{fours:080:15}) are so.%
@@ 303,6 +306,8 @@ why~(\ref{fours:080:10}) and~(\ref{fours
We have actually already met Parseval's principle, informally, in
\S~\ref{inttx:260.20}.
+There is also \emph{Parseval's theorem} to come in
+\S~\ref{fouri:110.55}.
One can translate Parseval's principle from the Argand realm to the
analogous realm of geometrical vectors, if needed, in the obvious way.
@@ 351,12 +356,13 @@ expressing the useful concept of the rat
example, an internalcombustion engine whose crankshaft revolves once
every~20 millisecondswhich is to say, once every $1/3000$ of a
minuteruns thereby at a frequency of~3000 revolutions per
minute (RPM). Frequency however comes in two styles: cyclic frequency (as in
+minute (RPM) or, in other words,~3000 cycles per minute.
+Frequency however comes in two styles: cyclic frequency (as in
the engine's example), conventionally represented by letters like~$\nu$
and~$f$; and angular frequency, by letters like~$\omega$ and~$k$.
If~$T$, $\nu$ and~$\omega$ are letters taken to stand respectively for a
period of time, the associated cyclic frequency and the associated
angular frequency, then by definition
+period of time, for the associated cyclic frequency, and for the
+associated angular frequency, then by definition
\bq{fours:085:10}
\begin{split}
\nu T &= 1, \\
@@ 364,61 +370,184 @@ angular frequency, then by definition
\omega &= 2\pi \nu.
\end{split}
\eq
The period~$T$ will have dimensions of time like seconds. The cyclic
frequency~$\nu$ will have dimensions of inverse time like hertz (cycles
per second).%
\footnote{
+The period~$T$ will bear units of time like seconds or minutes. The cyclic
+frequency~$\nu$ will bear units of inverse time like cycles
+per second (hertz)
+or cycles per minute.%
+\footnote{%
Notice incidentally, contrary to the improper verbal usage one
sometimes hears, that there is no such thing as a ``hert.'' Rather,
``Hertz'' is somebody's name. The uncapitalized form ``hertz'' thus
 is singular as well as plural.
+ is singular as well as plural.%
}
The angular frequency~$\omega$ will have dimensions of
inverse time like radians per second.
+The angular frequency~$\omega$ will bear units of
+inverse time like radians per second or radians per minute.
\index{countability}
\index{baseball}
\index{cycle}
\index{radian}
\index{second}
The applied mathematician should make himself aware, and thereafter keep
in mind, that the cycle per second and the radian per second do
not differ dimensionally from one another. Both are technically units
of $[\mbox{second}]^{1}$, whereas the words ``cycle'' and ``radian'' in
the contexts of the phrases ``cycle per second'' and ``radian per
second'' are verbal cues that, in and of themselves, play no actual part
in the mathematics. This is not because the cycle and the radian were
ephemeral but rather because the second is unfundamental. The second is
an arbitrary unit of measure. The cycle and the radian are definite,
+\index{minute}
+\index{unit!of measure}
+\index{measure, unit of}
+The last brings us to a point that has confused many students of
+science and engineering: if $T=20\ \mbox{ms}$ and $\nu=3000\
+\mbox{cycles}/\mbox{min}$, then why not $\nu
+T = (3000\:\mbox{cycles}/\mbox{min})(20\:\mbox{ms}) =
+6.0\times 10^4\:
+\mbox{cycle}\cdot\mbox{ms}/\mbox{min}=1\:\mbox{cycle}\neq 1?$
+The answer is that a cycle is not conventionally held
+to be a unit of measure and, thus, does not conventionally enter
+into the arithmetic.\footnote{%
+ % diagn: review this new footnote
+ The writer believes the convention to be wise. The reason behind the
+ convention is not easy to articulate (though the narrative will try
+ to articulate it, anyway), but experience does seem to support the
+ convention nevertheless. Little is gained, and convenience is lost,
+ when onecontrary to conventiontreats a countable entity like a
+ cycle as one would treat an arbitrary quantity of physical reference
+ like a second. The cycle and the second are not things of the same
+ kind. As such, they tend not to approve treatment of the same kind,
+ even if such treatment is possible.%
+}
+Minimally correct usage is rather that
+\[
+ \nu T =
+ (3000\:\mbox{min}^{1})(20\:\mbox{ms}) =
+ 6.0\times 10^4\frac{\mbox{ms}}{\mbox{min}}=1.
+\]
+As for the word ``cycle,'' one can include the word in the above line if
+one wishes to include it, but with the understanding that the word lacks
+arithmetic significance. Arithmetically, one can drop the word at any
+stage.
+
+\index{countability}
+\index{baseball}
+It follows, perhaps unexpectedly,
+that the cycle per minute and the radian per minute do
+not arithmetically differ from one another. Arithmetically,
+counterintuitively,
+\[
+ \frac{1}{60}\,\frac{\mbox{cycle}}{\mbox{second}} =
+ 1\frac{\mbox{cycle}}{\mbox{minute}} =
+ 1\frac{\mbox{radian}}{\mbox{minute}} =
+ \frac{1}{60}\,\frac{\mbox{radian}}{\mbox{second}}.
+\]
+This looks obviously wrong, of course, but don't worry: it is a mere
+tautology which, though perhaps literally accurate (we will explain why),
+expresses no very profound idea. Its sole point is that both the
+cycle per minute and the radian per minute, interpreted as units of
+measure, are units of $[\mbox{minute}]^{1}$; whereasin the context
+of phrases like ``cycle per minute'' and ``radian per minute''the
+words ``cycle'' and ``radian'' are verbal cues that, in and of
+themselves, play no actual part in the mathematics. \emph{This is not
+because the cycle and the radian were ephemeral but rather because the
+minute is unfundamental.}
+
+The minute, a unit of measure representing a definite but arbitrary
+quantity of time, requires arithmetical representation. The
+cycle and the radian, by contrast, are nonarbitrary,
discrete, inherently countable things; and, where things are counted, it
is ultimately up to the mathematician to interpret the count (consider
for instance that nine baseball hats may imply nine baseball players and
+for instance that nine baseball caps may imply nine baseball players and
one baseball team, but that there is nothing in the number nine itself
to tell us so). To distinguish angular frequencies from cyclic
frequencies, it remains to the mathematician to lend factors of~$2\pi$
where needed.
+If, nevertheless, you think the last displayed equation just too weird,
+then
+don't write it that way; but think of, say, gear ratios. A gear ratio
+might be 3:1 or 5:1 or whatever, but the ratio is unitless. You can
+\emph{say}~``3.0 turns of the small gear per turn of the large gear,''
+but this manner of speaking
+does not make the ``turn of the small gear per turn of the large
+gear'' a proper unit of measure. The ``cycle per radian'' is in the
+same sense likewise not a proper unit of measure. (Now, if you
+\emph{still} think the last displayed equation just too
+weirdwell, it \emph{is} weird. You can ignore the equation, instead
+interpreting the expression ``cycle per radian'' as a way of naming the
+number~$2\pi$. This sort of works, but beware that such an interpretation
+does not extend very well to other nonunit units like ``decibel'' and is
+not the interpretation the writer recommends. Also beware: an
+expression like $\sin\{[2\pi/4]\:\mbox{radians}\} =
+\sin[2\pi/4] = 1$ means something sensible whereas an expression like
+$\sin\{[2\pi/4]\:\mbox{minutes}\} = \mbox{??}$ probably does
+not.\footnote{The word ``minute'' here refers to the
+period of time. There is, separately, an angle called by the same
+English word, ``minute,'' but that is not what we are talking about
+here.} Anyway, if ``radian'' is taken to be~1as it must be taken if
+$\sin\{[2\pi/4]\:\mbox{radians}\}$ is to come out rightthen ``cycle''
+must be taken to be~$2\pi$, which does not quite square with
+eqn.~\ref{fours:085:10}, does it? No, the problem is that the radian
+and the cycle are no units of measure.)\footnote{%
+ Some recent undergraduate engineering textbooks have taken to
+ the style of
+ \[
+ E = \frac{Q}{Cd}\ [\mbox{volts/meter}].
+ \]
+ The intent seems to be to encourage undergraduates to include units of
+ measure with their engineering quantities, as
+ \[
+ E = \frac{Q}{Cd} = 5.29\:\mbox{volts}/\mbox{meter}.
+ \]
+ Unfortunately, my own, occasional experience at teaching
+ undergraduates suggests that undergraduates tend to read the textbook
+ as though it had read
+ \[
+ E = \left[\frac{Q}{Cd}\right]\left[1.0\:\frac{\mbox{volt}}{\mbox{meter}}\right],
+ \]
+ which is wrong and whose resultant confusion compounds, wasting
+ hours of the undergraduates' time. It seems to me preferable to
+ insist that undergraduates learn from the first the correct meaning of
+ an unadorned equation like%
+ \[
+ E = \frac{Q}{Cd},
+ \]
+ where, say,
+ $Q=13.3\ \mbox{volt}\cdot\mbox{sec}/\mbox{ohm}$,
+ $C=0.470\ \mbox{sec}/\mbox{ohm}$, and
+ $d=5.35\ \mbox{cm}$; and that they grasp the need not to write
+ algebraically perplexing falsities such as that ``$d\ \mbox{cm} =
+ 5.35\ \mbox{cm}$''perplexing falsities which, unfortunately, the
+ textbook style in question inadvertently encourages them to write.
+
+ When during an engineering lecture it becomes pedagogically necessary
+ to associate units of measure to a symbolic equation, my own practice
+ at the blackboard has been to write
+ \[
+ E = \frac{Q}{Cd},\quad E\ :\ [\mbox{volts/meter}].
+ \]
+ Done sparingly, this seems to achieve the desired effect, though in
+ other instances the unadorned style is preferred.
+}
+
The word ``frequency'' without a qualifying adjective is usually taken
to mean cyclic frequency unless the surrounding context implies
otherwise.
+in English to mean cyclic frequency unless the surrounding context
+implies otherwise. Notwithstanding, interestingly, experience seems to
+find angular frequency to be oftener the more natural or convenient to
+use.
\index{frequency!spatial}
\index{spatial frequency}
+\index{wave number}
Frequencies exist in space as well as in time:
\bq{fours:085:15}
k\lambda = 2\pi.
\eq
Here,~$\lambda$ is a \emph{wavelength} measured in meters or other units
of length. The \emph{wavenumber}%
\footnote{
 The wavenumber~$k$ is no integer, notwithstanding that the letter~$k$
 tends to represent integers in other contexts.
+of length. The \emph{wave number}%
+\footnote{%
+ One could wish for a better name for the thing than \emph{wave
+ number.} By whatever name, the wave number~$k$ is no integer,
+ notwithstanding that the letter~$k$ tends to represent integers in
+ other contexts.%
}%
~$k$ is an angular spatial frequency measured in units like radians per
meter. (Oddly, no conventional symbol for cyclic spatial frequency
seems to be current. The literature just uses $k/2\pi$ which, in light
of the potential for confusion between~$\nu$ and~$\omega$ in the
temporal domain, is probably for the best.)
+meter; that is, $[\mbox{meter}]^{1}$. (Oddly, no conventional symbol
+for \emph{cyclic} spatial frequency seems to be current. The literature
+mostly just uses $k/2\pi$ which, in light of the potential for confusion
+between~$\nu$ and~$\omega$ in the temporal domain, is probably for the
+best.)
\index{propagation speed}
\index{speed!of propagation}
@@ 429,15 +558,15 @@ Where a wave propagates the propagation
relates periods and frequencies in space and time.
\index{dimensionlessness}
Now, we must admit that we fibbed when we said that~$T$ had to have
+Now, we must admit that we fibbed when we said (or implied) that~$T$ had to have
dimensions of time. Physically, that is the usual interpretation, but
mathematically~$T$ (and~$T_1$, $t$, $t_o$, $\tau$, etc.) can bear any
units and indeed are not required to bear units at all, as
\S~\ref{fours:080} has observed. The only mathematical requirement is
+mathematically~$T$ (and~$T_1$, $t$, $t_o$, $\tau$, etc.)\ can bear any
+units and indeed is not required to bear units at all, a fact to which
+\S~\ref{fours:080} has alluded. The only mathematical requirement is
that the product $\omega T = 2\pi$ (or $\Delta\omega\, T_1 = 2\pi$ or
the like, as appropriate) be dimensionless. However, when~$T$ has
dimensions of length rather than time it is conventionalindeed, it is
practically mandatory if one wishes to be understoodto change
+dimensions of length rather than of time it is conventionalindeed, it
+is practically mandatory if one wishes to be understoodto change
$\lambda \la T$ and $k \la \omega$ as this section has done, though the
essential Fourier mathematics is the same regardless of $T$'s dimensions
(if any) or of whether alternate symbols like~$\lambda$ and~$k$ are
@@ 445,42 +574,51 @@ used.
% 
\section{The square, triangular and Gaussian pulses}
+\section{Several useful pulses}
\label{fours:095}
\index{pulse}
+\index{pulse!useful}
\index{pulse!square}
\index{square pulse}
\index{pulse!triangular}
\index{triangular pulse}
+\index{pulse!raisedcosine}
+\index{raisedcosine pulse}
+\index{pulse!Gaussian}
+\index{Gaussian pulse}
\index{Dirac delta function!implementation of}
\index{delta function, Dirac!implementation of}
\index{pulse!Gaussian}
\index{Gaussian pulse}
\index{$\Pi$ as the rectangular pulse}
\index{$\Lambda$ as the triangular pulse}
+\index{$\Psi$ as the raisedcosine pulse}
\index{$\Omega$ as the Gaussian pulse}
The Dirac delta of \S~\ref{integ:670} and of Fig.~\ref{integ:670:figd}
is useful for the unit area it covers among other reasons, but for some
+The Dirac delta of \S~\ref{integ:670} and Fig.~\ref{integ:670:figd}
+is among other reasons useful for the unit area it covers, but for some
purposes its curve is too sharp. One occasionally finds it expedient to
substitute either the \emph{square} or the \emph{triangular pulse} of
Fig.~\ref{fours:095:fig1},
+substitute the \emph{square,} \emph{triangular} or \emph{raisedcosine
+pulse} of Fig.~\ref{fours:095:fig1},
\bq{fours:095:10}
 \settowidth\tlb{$1  \left t \right$}
+ \settowidth\tlb{$\left[1 + \cos (\pi t) \right]/2$}
\begin{split}
\Pi (t) &\equiv \begin{cases}
 \makebox[\tlb][l]{$1$} &\mbox{if $\leftt\right \le 1/2$,} \\
+ \makebox[\tlb][l]{$1$} &\mbox{if $\leftt\right < 1/2$,} \\
+ \makebox[\tlb][l]{$1/2$} &\mbox{if $\leftt\right = 1/2$,} \\
0 &\mbox{otherwise;}
\end{cases} \\
\Lambda(t) &\equiv \begin{cases}
\makebox[\tlb][l]{$1  \left t \right$} &\mbox{if $\leftt\right \le 1$,} \\
0 &\mbox{otherwise;}
+ \end{cases} \\
+ \Psi(t) &\equiv \begin{cases}
+ \makebox[\tlb][l]{$\left[1 + \cos (\pi t) \right]/2$} &\mbox{if $\leftt\right \le 1$,} \\
+ 0 &\mbox{otherwise;}
\end{cases}
\end{split}
\eq
for the Dirac delta,
\begin{figure}
 \caption{The square, triangular and Gaussian pulses.}
+ \caption{The square, triangular, raisedcosine and Gaussian pulses.}
\label{fours:095:fig1}
\bc
\nc\xxxab{4.3}
@@ 493,15 +631,16 @@ for the Dirac delta,
\nc\xxp{0.25}
\nc\xxq{1.90}
\nc\xxqqq{5.70}
+ \nc\xxxqq{9.50}
\nc\fxa{5.0} \nc\fxb{5.0}
 \nc\fya{5.6} \nc\fyb{4.5}
+ \nc\fya{9.4} \nc\fyb{4.5}
\begin{pspicture}(\fxa,\fya)(\fxb,\fyb)
%\psframe[linewidth=0.5pt,dimen=outer](\fxa,\fya)(\fxb,\fyb)
%\localscalebox{1.0}{1.0}
{
\small
\psset{dimen=middle}
 \settowidth\tlj{$\Pi$}
+ \settowidth\tlj{$\Psi$}
\rput(0, \xxq){%
{
\psset{linewidth=0.5pt}
@@ 554,6 +693,28 @@ for the Dirac delta,
\psline(\xxxab,0)(\xxxab,0)
\psline(0,\xxyac)(0,\xxyab)
\uput[r](\xxxab,0){$t$}
+ \uput[u](0,\xxyab){$\makebox[\tlj][r]{$\Psi$}(t)$}
+ %\psline(\xxl,\tlb)( \xxl,\tlb)
+ \uput[ur](0,\tlb){$1$}
+ % Raised cosine.
+ \psplot[plotpoints=300,linewidth=2.0pt]{4.05}{4.05}{
+ /scale 1.2 def
+ /xs x scale div def
+ xs 1.0 le xs 1.0 ge or {0.0} {xs 180 mul cos 0.5 mul 0.5 add scale mul} ifelse
+ }
+ \psline( 1.0\tlb, \xxl)( 1.0\tlb,\xxl)
+ \uput[d]( 1.0\tlb,\xxl){\makebox[\tlc][r]{$1$}}
+ \psline(1.0\tlb, \xxl)(1.0\tlb,\xxl)
+ \uput[d](1.0\tlb,\xxl){\makebox[\tlc][r]{$1$}}
+ }%
+ }%
+ \rput(0,\xxxqq){%
+ {
+ \psset{linewidth=0.5pt}
+ \settowidth\tlc{$1$}
+ \psline(\xxxab,0)(\xxxab,0)
+ \psline(0,\xxyac)(0,\xxyab)
+ \uput[r](\xxxab,0){$t$}
\uput[u](0,\xxyab){$\makebox[\tlj][r]{$\Omega$}(t)$}
\uput[ur](0,0.39894\tlb){$\frac{1}{\sqrt{2\pi}}$}
}%
@@ 571,35 +732,39 @@ for the Dirac delta,
\end{pspicture}
\ec
\end{figure}
both of which pulses evidently share Dirac's property that
+each of which pulses evidently shares Dirac's property that
\bq{fours:095:20}
 \settowidth\tla{$\Pi$}
+ \settowidth\tla{$\Psi$}
\begin{split}
\int_{\infty}^\infty \frac{1}{T}
\makebox[\tla][c]{$\delta $}\left(\frac{\tau  t_o}{T}\right) \,d\tau &= 1, \\
\int_{\infty}^\infty \frac{1}{T}
\makebox[\tla][c]{$\Pi $}\left(\frac{\tau  t_o}{T}\right) \,d\tau &= 1, \\
\int_{\infty}^\infty \frac{1}{T}
 \makebox[\tla][c]{$\Lambda$}\left(\frac{\tau  t_o}{T}\right) \,d\tau &= 1,
+ \makebox[\tla][c]{$\Lambda$}\left(\frac{\tau  t_o}{T}\right) \,d\tau &= 1, \\
+ \int_{\infty}^\infty \frac{1}{T}
+ \makebox[\tla][c]{$\Psi $}\left(\frac{\tau  t_o}{T}\right) \,d\tau &= 1,
\end{split}
\eq
for any real $T>0$ and real~$t_o$. In the limit,
%it may be observed that
\bq{fours:095:30}
 \settowidth\tla{$\Pi$}
+ \settowidth\tla{$\Psi$}
\begin{split}
\lim_{T \ra 0^{+}} \frac{1}{T}
\makebox[\tla][c]{$\Pi $}\left(\frac{t  t_o}{T}\right)
&= \delta(t  t_o), \\
\lim_{T \ra 0^{+}} \frac{1}{T}
\makebox[\tla][c]{$\Lambda$}\left(\frac{t  t_o}{T}\right)
+ &= \delta(t  t_o), \\
+ \lim_{T \ra 0^{+}} \frac{1}{T}
+ \makebox[\tla][c]{$\Psi $}\left(\frac{t  t_o}{T}\right)
&= \delta(t  t_o),
\end{split}
\eq
constituting at least two possible implementations of the Dirac delta in
case such an implementation were needed. Looking ahead, if we may
+constituting at least three possible implementations of the Dirac delta
+in case such an implementation were needed. Looking ahead, if we may
further abuse the Greek capitals to let them represent pulses whose
shapes they accidentally resemble, then a third, subtler
+shapes they accidentally resemble, then a fourth, subtler
implementationmore complicated to handle but analytic
(\S~\ref{taylor:320}) and therefore preferable for some purposesis
the \emph{Gaussian pulse}
@@ 612,11 +777,30 @@ the \emph{Gaussian pulse}
\eqa
the mathematics of which \S~\ref{fouri:130} and
% diagn: check the following reference.
Chs.~\ref{specf} and~\ref{prob}
will unfold.
+%chapters~\ref{specf} and~\ref{prob}
+chapter~\ref{prob}
+will begin to unfold.
%but see Fig.~\ref{prob:normdistfig} on page~\pageref{prob:normdistfig}
%for a plot.
+\index{signal!discretely sampled}
+The three nonanalytic pulses share a convenient property
+for all~$t$:
+\bq{fours:095:40}
+ \settowidth\tla{$\Psi$}
+ \begin{split}
+ \sum_{m=\infty}^\infty \makebox[\tla][c]{$\Pi $}(tm) &= 1; \\
+ \sum_{m=\infty}^\infty \makebox[\tla][c]{$\Lambda$}(tm) &= 1; \\
+ \sum_{m=\infty}^\infty \makebox[\tla][c]{$\Psi $}(tm) &= 1.
+ \end{split}
+\eq
+This property makes the three pulses especially useful in the rendering
+of discretely sampled signals.
+%The fourth pulse, $\Omega(t)$, lacks the property~(\ref{fours:095:40})
+%but is analytic (\S~\ref{taylor:320}), which the others are not.
+%\index{Archimedes (287212~B.C.)}%
+%\index{Cantor, Georg (18451918)}%
+
% 
\section[Expanding waveforms in Fourier series]%
@@ 627,7 +811,7 @@ will unfold.
\index{sinusoid}
\index{complex exponential}
\index{exponential!complex}
+\index{exponential, complex}
\index{Fourier coefficient}
\index{coefficient!Fourier}
The Fourier series represents a repeating waveform~(\ref{fours:000:11})
@@ 670,7 +854,7 @@ Equation~(\ref{fours:100:10}) has propos
waveform as a series of complex exponentials, each exponential of the
form $a_j e^{ij \,\Delta\omega\, t}$ in which~$a_j$ is a weight to be
determined. Unfortunately,~(\ref{fours:100:10}) can hardly be very
useful until the several~$a_j$ actually are determined, whereas how to
+useful until the several~$a_j$ are determined, whereas how to
determine~$a_j$ from~(\ref{fours:100:10}) for a given value of~$j$ is
not immediately obvious.
@@ 692,13 +876,7 @@ of~(\ref{fours:100:15}) then we should h
Accordingly, changing dummy variables $\tau \la t$ and $\ell \la j$
in~(\ref{fours:100:10}) and then substituting
into~(\ref{fours:100:15})'s right side the resulting expression for
$f(\tau)$, we have by successive steps that%
\footnote{\label{fours:100:fn10}%
 It is unfortunately conventional to footnote steps like these with
 some formal remarks on convergence and the swapping of
 summational/integrodifferential operators. Refer to
 \S\S~\ref{integ:240.10} and~\ref{integ:240.20}.
}
+$f(\tau)$, we have by successive steps that
\bqb
\lefteqn{\frac{1}{T_1} \int_{t_oT_1/2}^{t_o+T_1/2}
e^{ij\,\Delta\omega\,\tau} f(\tau) \,d\tau} &&
@@ 723,18 +901,19 @@ $f(\tau)$, we have by successive steps t
\eqb
in which Parseval's principle~(\ref{fours:080:10}) has killed all
but the $\ell=j$ term in the summation. Thus is~(\ref{fours:100:15})
formally proved.
+proved.
Though the foregoing formally completes the proof, the idea behind the
formality remains more interesting than the formality itself, for one
+Except maybe to the extent to which one would like to examine
+convergence (see the next paragraph), the idea behind the
+proof remains more interesting than the proof itself, for one
would like to know not only the fact that~(\ref{fours:100:15}) is true
but also the thought which leads one to propose the equation in the
first place. The thought is as follows. Assuming
that~(\ref{fours:100:10}) indeed can represent the waveform $f(t)$
+that~(\ref{fours:100:10}) can indeed represent the waveform $f(t)$
properly, one observes that the transforming factor
$e^{ij\,\Delta\omega\,\tau}$ of~(\ref{fours:100:15}) serves to shift
the waveform's $j$th component $a_j e^{ij \,\Delta\omega\, t}$whose
angular frequency evidently is $\omega = j \,\Delta\omega$down to a
+angular frequency is evidently $\omega = j \,\Delta\omega$down to a
frequency of zero, incidentally shifting the waveform's several other
components to various nonzero frequencies as well. Significantly, the
transforming factor leaves each shifted frequency to be a whole multiple
@@ 745,6 +924,45 @@ integrating the components over complete
zeroshifted component which, once shifted, has no cycle. Such is the
thought which has given rise to the equation.
+Before approving the proof's interchange of summation and integration, a
+pure mathematician would probably have preferred to establish
+conditions under which the summation and integration should each
+converge. To the applied mathematician however, the establishment of
+\emph{general} conditions turns out to be an unrewarding
+exercise,\footnote{%
+ \index{Dirichlet, J.~Peter Gustav Lejeune (18051859)}%
+ The conditions conventionally observed among professional
+ mathematicians seem to be known as the \emph{Dirichlet conditions.}
+ As far as this writer can tell, the Dirichlet conditions lie pretty
+ distant from applicationsnot that there aren't concrete
+ applications that transgress them (for example in stochastics), but
+ rather that the failure of~(\ref{fours:100:15}) to converge in a given
+ concrete application is more readily apparent by less abstract means
+ than Dirichlet's.
+
+ This book could merely \emph{list} the Dirichlet conditions without
+ proof; but, since the book is a book of derivations, it will decline
+ to do that.
+ The conditions look plausible. We'll leave it at that.
+
+ The writer suspects that few readers will ever encounter a concrete
+ application that really wants the Dirichlet conditions, but one never
+ knows. The interested reader can pursue Dirichlet elsewhere. (Where?
+ No recommendation. No book on the writer's shelf seems strong enough
+ on Dirichlet to recommend.)%
+}
+so we will let the matter pass with this remark: nothing prevents one
+from treating~(\ref{fours:100:10}) as
+\[
+ f(t) = \lim_{J\ra\infty}
+ \sum_{j=(J1)}^{J1} a_j e^{ij \,\Delta\omega\, t},
+\]
+which manages the convergence problem (to the extent to which it even is
+a problem) in most cases of practical interest. Further work on the
+convergence problem is left to the charge of the concerned reader, but
+see also \S\S~\ref{integ:240.10}, \ref{integ:240.20}
+and~\ref{conclu:convergence}.
+
\subsection{The square wave}
\label{fours:100.20}
\index{square wave}
@@ 813,14 +1031,12 @@ Fig.~\ref{fours:000:fig20} have proposed
The square wave of \S~\ref{fours:100.20} is an important, canonical case
and~(\ref{fours:000:20}) is arguably worth memorizing. After the square
wave however the variety of possible repeating waveforms has no end.
Whenever an unfamiliar repeating waveform arises, one can calculate its
Fourier coefficients~(\ref{fours:100:15}) on the spot by the
straightforward routine of \S~\ref{fours:100.20}. There seems little
point therefore in trying to tabulate waveforms here.
+wave, however, an endless variety of repeating waveforms present
+themselves. Section~\ref{fours:100.20} has exampled how to compute
+their Fourier series.
\index{duty cycle}
One variant on the square wave nonetheless is interesting enough to
+One variant on the square wave is nonetheless interesting enough to
attract special attention. This variant is the \emph{pulse train} of
Fig.~\ref{fours:100:fig3},
\bq{fours:100:27}
@@ 875,9 +1091,6 @@ each cycle its pulse is as it were on du
\rput(0,0.1\tla){%
\psline(0.1\tla,\xxl)(0.1\tla,\xxl)%
\psline( 0.1\tla,\xxl)( 0.1\tla,\xxl)%
 %\rput(0.1\tla,0){\psline{>}(\xxma,0)(0,0)}%
 %\rput( 0.1\tla,0){\psline{>}( \xxma,0)(0,0)}%
 %\rput(0.00,0){\footnotesize$\eta T_1$}%
\psline{<>}(0.1\tla,0)(0.1\tla,0)%
\rput( 0.1\tla,0){%
\psline(\xxma,0)(0,0)%
@@ 1041,7 +1254,7 @@ All this however supposes that the Fouri
\footnote{
The remainder of this dense subsection can be regarded as optional reading.
}
% The following bad break depends not only on the wording of the
+% diagn: The following bad break depends not only on the wording of the
% sentence but probably also on the index of the foregoing footnote.
% It is thus a chancy bad break, needing monitoring.
% bad break
@@ 1166,12 +1379,21 @@ admittedly invalidate the discretization
nothing prevents us from approximating the square wave's
discontinuity by an arbitrarily steep slope, whereupon this subsection's
conclusion again applies.%
\footnote{
+\footnote{%
Where this subsection's conclusion cannot be made to apply is where
unreasonable waveforms like $A\sin[B/\sin\omega t]$ come into play.
We will leave to the professional mathematician the classification of
such unreasonable waveforms, the investigation of the waveforms'
Fourier series and the provision of greater rigor generally.
+
+ One can object to the subsection's reliance on discretization, yet
+ discretization is a useful technique, and to the extent to which pure
+ mathematics has not yet recognized and formalized it, \emph{maybe} that
+ suggestsin the spirit of~\cite{Robinson}that some interested
+ professional mathematician has more work to do, whenever he gets
+ around to it. Or maybe it doesn't. Meanwhile, a lengthy, alternate,
+ more rigorous proof that does not appeal to discretization is found
+ in~\cite[chapter~3]{HFDavis}.%
}
The better, more subtle, more complete answer to the question though is
@@ 1672,9 +1894,13 @@ $j=0$, since there is no~$U_{1}$) and t
&
0 = S_0 < S_{2m} < S_{2m+2} < S_{\infty} < S_{2m+3} < S_{2m+1} < S_1
&\\&
 \mbox{for all $m>0$, $m \in \mathbb Z$.}
+ \mbox{for all $m>0$, $m \in \mathbb Z$;}
&
\eqb
+or in other words that
+\[
+ 0 < S_0 < S_2 < S_4 < S_6 < S_8 < \cdots < S_\infty < \cdots < S_9 < S_7 < S_5 < S_3 < S_1.
+\]
The foregoing applies only when $t \ge 0$ but naturally one can reason
similarly for $t \le 0$, concluding that the integral's global maximum
and minimum over the real domain occur respectively at the sineargument
@@ 1701,7 +1927,7 @@ will be the subject of the next subsecti
%but for the moment we will only observe that one often wants unusually
%clever techniques to discover constants like the~%
%$2\pi/4$ of~(\ref{fours:160:40}). Fortunately, some unusually clever
%mathematicians have toiled, in the words of G.S.~Brown, ``a very long
+%mathematicians have toiled, in the words of G.~S.\ Brown, ``a very long
%time, in very dark rooms,''%
%\footnote{\cite{Brownlecture}}
%over the past few centuries, precisely to solve problems like this one.
@@ 1966,8 +2192,7 @@ Fourier series oscillates and overshoots
\index{ringing}
Henry Wilbraham investigated this phenomenon as early as~1848\@.
% bad break
\linebreak
+\linebreak % bad break
J.~Willard Gibbs
explored its engineering implications in~1899.%
\footnote{%
@@ 1987,7 +2212,7 @@ significant physical effects.
\index{integration!as summation}
\index{summation!as integration}
Changing $t  T_1/4 \la t$ in~(\ref{fours:000:20}) to delay the square
wave by a quarter cycle yields
+wave by a quarter cycle yields that
\[
f(t) = \frac{8A}{2\pi}
\sum_{j=0}^{\infty} \frac{1}{2j+1}
@@ 2128,10 +2353,9 @@ giving it a steep but not vertical slope
a little;%
\footnote{
If the applied mathematician is especially exacting he might represent
 a discontinuity by the probability integral of
 % diagn
 [not yet written]
 or maybe (if slightly less exacting) as an arctangent,
+ a discontinuity by the cumulative normal distribution
+ function~(\ref{prob:100:40})
+ or maybe (if slightly less exacting) by an arctangent,
and indeed there are times at which he might do so. However,
such extrafine mathematical craftsmanship is unnecessary to this
section's purpose.
@@ 2148,4 +2372,58 @@ engineer or other applied mathematician
Gibbs' phenomenon and of the mathematics behind it for this reason.
% 
+% This section didn't quite work. Maybe some day.
+%
+%\section{Waveforms of limited domain}
+%\label{fours:180}
+%
+%A waveform of limited domain can be modeled as a repeating waveform. If
+%it can be modeled as a repeating waveform, then its Fourier series can
+%normally be expressed.
+%
+%\begin{figure}
+% \caption{Here.}
+% \label{fours:180fig1}
+% \bc
+% \nc\fxa{5.0} \nc\fxb{5.0}
+% \nc\fya{3.0} \nc\fyb{3.0}
+% \begin{pspicture}(\fxa,\fya)(\fxb,\fyb)
+% %\psframe[linewidth=0.5pt,dimen=outer](\fxa,\fya)(\fxb,\fyb)
+% %\localscalebox{1.0}{1.0}
+% {%
+% \small%
+% \psset{dimen=middle,linewidth=2.0pt}%
+% \nc\ttb{4.65}
+% \nc\ttc{1.5}
+% \nc\ttd{1.63636363636364}
+% {%
+% \psset{linewidth=0.5pt}%
+% \psline(\ttb,0)(\ttb,0)%
+% \uput[r](\ttb,0){$t$}%
+% }%
+% {%
+% \psline(\ttd,\ttc)(\ttd,\ttc)%
+% \psline( \ttd,\ttc)( \ttd,\ttc)%
+% }%
+% \nc\tta{4.5}
+% \psplot[plotpoints=500]{\ttd}{\ttd}{%
+% /twopi 6.28318530717959 def
+% /domega 110.0 def
+% /ampl 0.40 def
+% %x domega mul cos 1.0 mul ampl mul 0.87833 mul
+% x domega mul sin 1.0 mul ampl mul 0.42645 mul
+% %x domega mul 2.0 mul cos 1.0 mul ampl mul 1.36897 mul 2.0 div
+% x domega mul 2.0 mul sin 1.0 mul ampl mul 0.00542 mul 2.0 div
+% %x domega mul 3.0 mul cos 1.0 mul ampl mul 1.00534 mul 3.0 div
+% x domega mul 3.0 mul sin 1.0 mul ampl mul 0.58394 mul 3.0 div
+% %x domega mul 4.0 mul cos 1.0 mul ampl mul 0.29542 mul 4.0 div
+% x domega mul 4.0 mul sin 1.0 mul ampl mul 0.55244 mul 4.0 div
+% %x domega mul 5.0 mul cos 1.0 mul ampl mul 0.92348 mul 5.0 div
+% x domega mul 5.0 mul sin 1.0 mul ampl mul 0.73342 mul 5.0 div
+% add add add add %add add add add add
+% }%
+% }
+% \end{pspicture}
+% \ec
+%\end{figure}
diff pruN 0.53.201204142/tex/gjrank.tex 0.56.20180123.12/tex/gjrank.tex
 0.53.201204142/tex/gjrank.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/gjrank.tex 20180116 13:33:43.000000000 +0000
@@ 42,7 +42,7 @@ Except in \S~\ref{gjrank:337}, the chapt
likes in such a book as this. However, it is hard to see how to avoid
the rigor here, and logically the chapter cannot be omitted. We will
drive through the chapter in as few pages as can be managed, and then
onward to the more interesting matrix topics of Chs.~\ref{mtxinv}
+onward to the more interesting matrix topics of chapters~\ref{mtxinv}
and~\ref{eigen}.
% 
@@ 200,7 +200,7 @@ Table~\ref{gjrank:337:table} obtain.
\caption{Some elementary similarity transformations.}
\label{gjrank:337:table}
\settowidth\tla{${U^{\{k\}}}'$}
 \nc\xx{$L,U,L^{[k]},U^{[k]},L^{\{k\}},U^{\{k\}},L_\^{\{k\}},U_\^{\{k\}}$}
+ \nc\xx{$L,U,L_{[k]},U_{[k]},L^{[k]},U^{[k]},L^{\{k\}},U^{\{k\}},L_\^{\{k\}},U_\^{\{k\}}$}
\settowidth\tlb{\xx}
\bqb
T_{[i \lra j]} I T_{[i \lra j]} &=& I \\
@@ 242,16 +242,19 @@ Most of the table's rules are fairly obv
understood, though to grasp some of the rules it helps to sketch the relevant
matrices on a sheet of paper. Of course rigorous symbolic proofs can be constructed
after the pattern of \S~\ref{matrix:330.30}, but they reveal little or nothing
sketching the matrices does not. The symbols~$P$, $D$, $L$ and~$U$ of course
+sketching the matrices does not. In Table~\ref{gjrank:337:table} as
+elsewhere, the symbols~$P$, $D$, $L$ and~$U$
represent the quasielementaries and unit triangular matrices of
\S\S~\ref{matrix:325} and~\ref{matrix:330}. The symbols~$P'$, $D'$, $L'$
and~$U'$ also represent quasielementaries and unit triangular matrices, only not
necessarily the same ones~$P$, $D$, $L$ and~$U$ do.
The rules permit one to commute some but not all elementaries past a
quasielementary or unit triangular matrix, without fundamentally altering the
character of the quasielementary or unit triangular matrix, and sometimes
without altering the matrix at all. The rules find use among other
+The rules of Table~\ref{gjrank:337:table}
+permit one to commute some but not all elementaries past a
+quasielementary operator or unit triangular matrix
+without fundamentally altering the character of the quasielementary
+operator or unit triangular matrix, and sometimes
+without changing it at all. The rules find use among other
places in the GaussJordan decomposition of \S~\ref{gjrank:341}.
% 
@@ 314,7 +317,8 @@ where
The GaussJordan decomposition is also called the \emph{GaussJordan
factorization.}
Whether all possible matrices~$A$ have a GaussJordan decomposition
+Whether all possible dimensionlimited, $m\times n$ matrices~$A$
+have a GaussJordan decomposition
(they do, in fact) is a matter this section addresses. Howeverat
least for matrices which do have onebecause~$G_>$ and~$G_<$ are
composed of invertible factors, one can leftmultiply the equation $A =
@@ 345,21 +349,27 @@ has both significant theoretical propert
applications, and in any case needs less advanced preparation to
appreciate than the others, and (at least as developed in this book)
precedes the others logically. It emerges naturally when one posits a
pair of square, $n \times n$ matrices,~$A$ and~$A^{1}$, for which
+pair of dimensionlimited, square,
+$n \times n$ matrices,~$A$ and~$A^{1}$, for which
$A^{1} A = I_n$, where~$A$ is known and~$A^{1}$ is to be determined.
(The~$A^{1}$ here is the $A^{1(n)}$ of eqn.~\ref{matrix:321:20}.
+[The~$A^{1}$ here is the $A^{1(n)}$ of eqn.~\ref{matrix:321:20}.
However, it is only supposed here that $A^{1} A = I_n$;
it is not \emph{yet} claimed that $A A^{1} = I_n$.)
+it is not \emph{yet} claimed that $A A^{1} = I_n$. ``Square'' means
+that the matrix has an $n\times n$ active region rather than an
+$m\times n$, $m\neq n$, where ``active region'' is defined as in
+\S~\ref{matrix:180.25}.]
To determine~$A^{1}$ is not an entirely trivial problem. The
matrix~$A^{1}$ such that $A^{1} A = I_n$ may or may not exist (usually
it does exist if~$A$ is square, but even then it may not, as
we shall soon see), and even if it does exist,
how to determine it is not immediately obvious. And still, if one can
determine~$A^{1}$, that is only for square~$A$; what if~$A$ is not
+determine~$A^{1}$, that is only for square~$A$; what if~$A$, having an
+$m\times n$, $m\neq n$, active region, were not
square? In the present subsection however we are not trying to prove
anything, only to motivate, so for the moment let us suppose a
square~$A$ for which~$A^{1}$ does exist, and let us seek~$A^{1}$ by
+anything, only to motivate, so for the moment let us suppose an~$A$ for
+which~$A^{1}$ does exist, let us confine our attention to square~$A$,
+and let us seek~$A^{1}$ by
leftmultiplying~$A$ by a sequence $\prod T$ of elementary row
operators, each of which makes the matrix more nearly resemble~$I_n$.
When~$I_n$ is finally achieved, then we shall have that
@@ 490,7 +500,8 @@ need or want, particularly to avoid divi
a zero in an inconvenient cell of the matrix (the reader might try
reducing $A=[0\;1; 1\;0]$ to~$I_2$, for instance; a row or column
interchange is needed here). Regarding~$K$, this factor comes into
play when~$A$ has broad rectangular rather than square shape, and also
+play when~$A$ has broad rectangular ($m$ and~$B_<$ exist
such that
+Let the symbols~$B_>$ and~$B_<$ respectively represent the
+aforementioned reversible row and column operations:
+\bq{gjrank:340:18}
+ \begin{split}
+ B_>^{1}B_> &= I = B_>B_>^{1}; \\
+ B_<^{1}B_< &= I = B_$
+and~$B_<$ exist such that
\bq{gjrank:340:20}
\begin{split}
B_>AB_< &= I_r, \\
 A &= B_>^{1} I_r B_<^{1}, \\
 B_>^{1}B_> &= I = B_>B_>^{1}, \\
 B_<^{1}B_< &= I = B_^{1} I_r B_<^{1}.
\end{split}
\eq
The question is whether in~(\ref{gjrank:340:20}) only a single rank~$r$
@@ 2043,11 +2094,20 @@ This finding has two immediate implicati
\item
Reversible row and/or column operations exist to change any matrix
of rank~$r$ to \emph{any other matrix} of the same rank. The reason is
 that, according to~(\ref{gjrank:340:20}), reversible operations
+ that, according to~(\ref{gjrank:340:20}), (\ref{gjrank:341:GJ})
+ and~(\ref{gjrank:341:GJinv}), reversible operations
exist to change both matrices to~$I_r$ and back.
\item
No reversible operation can change a matrix's rank.
\ei
+The finding further suggests a conjecture:
+\bi
+ \item
+ The matrices~$A$, $A^T$ and~$A^{*}$ share the same rank~$r$.
+\ei
+The conjecture is proved by using~(\ref{matrix:120:30})
+or~(\ref{matrix:120:33}) to compute the transpose or adjoint
+of~(\ref{gjrank:340:20}).
The discovery that every matrix has a single, unambiguous rank and the
establishment of a failproof algorithmthe GaussJordanto ascertain
@@ 2056,8 +2116,7 @@ achievements nonetheless, worth the effo
achievements matter is that the mere dimensionality of a matrix is a
chimerical measure of the matrix's true sizeas for instance for the
$3 \times 3$ example matrix at the head of the section. Matrix rank by
contrast is an entirely solid, dependable measure. We will rely on it
often.
+contrast is a solid, dependable measure. We will rely on it often.
Section~\ref{gjrank:340.30} comments further.
@@ 2069,11 +2128,13 @@ Section~\ref{gjrank:340.30} comments fur
\index{degenerate matrix}
\index{matrix!degenerate}
According to~(\ref{gjrank:341:22}), the rank~$r$ of a matrix can exceed
+According to~(\ref{gjrank:341:22}), the rank~$r$ of a dimensionlimited
+matrix (\S~\ref{matrix:180.25})let us refer to it as a \emph{matrix}
+(just to reduce excess verbiage)can exceed
the number neither of the matrix's rows nor of its columns. The
greatest rank possible for an $m\times n$ matrix is the lesser of~$m$
and~$n$. A \emph{fullrank} matrix, then, is defined to be an $m\times
n$ matrix with maximum rank $r=m$ or $r=n$or, if $m=n$, both. A
+n$ matrix with rank $r=m$ or $r=n$or, if $m=n$, both. A
matrix of less than full rank is a \emph{degenerate} matrix.
\index{linear combination}
@@ 2086,7 +2147,7 @@ column out. Having zeroed the dependent
interchange it over to the matrix's extreme right, effectively throwing
the column away, shrinking the matrix to $m\times(n1)$ dimensionality.
Shrinking the matrix necessarily also shrinks the bound on the matrix's
rank to $r\le n1$which is to say, to $r^{T}$ such that $I_n = B_<^{T}A^
which equation is $B_>AB_< = I_n$which in turn says that not
only~$A^T$, but also~$A$ itself, has full rank $r=n$.
Parallel reasoning rules the rows of broad matrices, $m\le n$, of
+Parallel reasoning rules the rows and columns of broad matrices, $m\le n$, of
course. To square matrices, $m=n$, both lines of reasoning apply.
\index{matrix!tall}
@@ 2141,6 +2202,12 @@ rank $r=m \le n$. Only a square matrix
full row rank at the same time, because a tall or broad matrix cannot
but include, respectively, more columns or more rows than~$I_r$.
+\index{rank!infinite}
+\index{infinite rank}
+Observe incidentally that extended operators, which per
+\S~\ref{matrix:180.35} define their $m\times n$ active regions
+differently, have infinite rank.
+
\subsection[Under and overdetermined systems (introduction)]
{Underdetermined and overdetermined linear systems (introduction)}
\label{gjrank:340.24}
@@ 2174,7 +2241,7 @@ is paradoxically both underdetermined an
Section~\ref{mtxinv:230} solves the exactly determined linear system.
Section~\ref{mtxinv:240} solves the nonoverdetermined linear system.
Section~\ref{mtxinv:320} analyzes the unsolvable overdetermined linear
system among others. Further generalities await Ch.~\ref{mtxinv}; but, regarding
+system among others. Further generalities await chapter~\ref{mtxinv}; but, regarding
the overdetermined system specifically, the present subsection would
observe at least the few following facts.
@@ 2327,15 +2394,21 @@ to.
\index{vector space!address of}
\index{space!address of}
Since $PDLU$ acts as a row operator,~(\ref{gjrank:340:60}) implies that
each row of the fullrank matrix~$A$ lies in the space the rows of~$I_n$
address. This is obvious and boring, but interesting is the converse
implication of~(\ref{gjrank:340:60})'s complementary form,
+each row of the square, $n\times n$ matrix~$A$ whose rank $r=n$ is full
+lies in the space the rows of~$I_n$ address.
+% A later review rediscovers the phrase on the next line,
+% which seems like pretty good prose.
+% I should try to write like that more.
+This is obvious and boring,
+but interesting is the converse implication
+of~(\ref{gjrank:340:60})'s complementary form,
\[
U^{1} L^{1} D^{1} P^{1} A = I_n,
\]
that each row of~$I_n$ lies in the space the rows of~$A$ address. The
rows of~$I_n$ and the rows of~$A$ evidently address the same space.
One can moreover say the same of~$A$'s columns, since $B=A^T$ has full
+One can moreover say the same of~$A$'s columns since, according to
+\S~\ref{gjrank:340.20}, $A^T$ has full
rank just as~$A$ does. In the whole, \emph{if a matrix~$A$ is square
and has full rank $r=n$, then $A$'s columns together, $A$'s rows
together, $I_n$'s columns together and $I_n$'s rows together each
@@ 2413,14 +2486,18 @@ Now, admittedly, adjectives like ``hones
``imposter,'' are a bit hyperbolic. The last paragraph has used them to
convey the subjective sense of the matter, but of course there is
nothing mathematically improper or illegal about a matrix of less than
full rank, so long as the true rank is correctly recognized. When one
+full rank so long as the true rank is correctly recognized. When one
models a physical phenomenon by a set of equations, one sometimes is
dismayed to discover that one of the equations, thought to be
independent, is really just a useless combination of the others. This
can happen in matrix work, too. The rank of a matrix helps one to
recognize how many truly independent vectors, dimensions or equations
one actually has available to work with, rather than how many seem
available at first glance. That is the sense of matrix rank.
+available at first glance.
+% In an earlier draft, the next line read, "of a matrix's rank," which
+% may have been more grammatical; but the prose reads better as now
+% written.
+Such is the sense of matrix rank.
%The reader was promised a tedious chapter, and it seems fair to say that
%the promise has been kept. The matrix is an odd animal. At first
@@ 2431,6 +2508,6 @@ available at first glance. That is the
%would expect, but an unexpectedly thick theoretical bramble, whose
%untangling demands such a tedious chapter as this. The uniqueness of
%matrix rank is the fruit of the bramble, worth the harvest, as we shall
%taste in Chs.~\ref{mtxinv} and~\ref{eigen}. Those chapters begin
+%taste in chapters~\ref{mtxinv} and~\ref{eigen}. Those chapters begin
%to harness the matrix to more interesting work.
diff pruN 0.53.201204142/tex/greek.tex 0.56.20180123.12/tex/greek.tex
 0.53.201204142/tex/greek.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/greek.tex 20180113 15:31:46.000000000 +0000
@@ 4,7 +4,7 @@
\index{Roman alphabet}
Mathematical experience finds the Roman alphabet to lack sufficient
symbols to write higher mathematics clearly. Although not completely
+symbols to write higher mathematics fluently. Though not completely
solving the problem, the addition of the Greek alphabet helps. See
Table~\ref{greek:288:greek}.%
\begin{table}
@@ 38,9 +38,9 @@ Table~\ref{greek:288:greek}.%
\ec
\end{table}
When first seen in mathematical writing, the Greek letters take on a
wise, mysterious aura. Well, the aura is finethe Greek letters are
prettybut don't let the Greek letters throw you. They're just
+When first seen in Englishlanguage mathematical writing,
+the Greek letters can seem to take on a
+wise, mysterious aura. Nevertheless, the Greek letters are just
letters. We use them not because we want to be wise and mysterious%
\footnote{
Well, you can use them to be wise and mysterious if you want to. It's
@@ 50,11 +50,11 @@ letters. We use them not because we wan
conventional ways: Greek minuscules (lowercase letters) for angles;
Roman capitals for matrices;~$e$ for the natural logarithmic base;~$f$
and~$g$ for unspecified functions;~$i$, $j$, $k$, $m$, $n$, $M$
 and~$N$ for integers;~$P$ and~$Q$ for metasyntactic elements;
+ and~$N$ for integers;~$P$ and~$Q$ for logical propositions and
+ metasyntactic elements;
%(the mathematical equivalents of \texttt{foo} and \texttt{bar});
 $t$, $T$
 and~$\tau$ for time;~$d$, $\delta$ and~$\Delta$ for change;~$A$, $B$
 and~$C$ for unknown coefficients; etc.
+ $t$, $T$ and~$\tau$ for time;~$d$, $\delta$ and~$\Delta$ for
+ change;~$A$, $B$ and~$C$ for indeterminate coefficients; etc.
}
but rather because we simply do not have enough Roman letters. An
equation like
@@ 113,5 +113,7 @@ which looks just like the Roman capital~
entirely proper member of the Greek alphabet. The Greek
minuscule~$\upsilon$ (upsilon) is avoided for like reason, for
mathematical symbols are useful only insofar as we can visually tell
them apart.
+them apart. Interestingly, however, the Greek minuscules~$\nu$ (nu)
+and~$\omega$ (omega) are often used in applied mathematics, so one needs
+to learn to distinguish those ones from the Roman~$v$ and~$w$.
diff pruN 0.53.201204142/tex/hex.tex 0.56.20180123.12/tex/hex.tex
 0.53.201204142/tex/hex.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/hex.tex 20180123 23:02:28.000000000 +0000
@@ 8,26 +8,31 @@ The importance of conventional mathemati
overstate. Such notation serves two distinct purposes: it conveys
mathematical ideas from writer to reader; and it concisely summarizes
complex ideas on paper to the writer himself. Without the notation, one
would find it difficult even to think clearly about the math; to discuss
it with others, nearly impossible.
+would find it difficult even to think clearly about the mathematics; to
+discuss it with others, nearly impossible.
The right notation is not always found at hand, of course. New
mathematical ideas occasionally find no adequate pre\"established
notation, when it falls to the discoverer and his colleagues to
establish new notation to meet the need. A more difficult problem
arises when old notation exists but is inelegant in modern use.

Convention is a hard hill to climb, and rightly so. Nevertheless, slavish
devotion to convention does not serve the literature well; for how else
can notation improve over time, if writers will not incrementally
improve it? Consider the notation of the algebraist Girolamo
Cardano in his 1539 letter to Tartaglia:
+The right notation is not always conveniently found at hand, of course.
+The discoverer of a new mathematical idea may require new notation to
+represent it. Even the user of an old mathematical idea may find that
+its old notation no longer suits.
+
+Regarding the last problem: old notation is conventional, and
+convention isand ought to bea steep hill to climb.
+Indeed, mathematics already enjoys excellent conventional notation, worn
+like an old boot to fit the collected experience of the centuries.
+% diagn: the next sentence, revised, wants another reading
+Nevertheless, slavish devotion to convention does not serve
+the mathematical literature well; for how else shall notation be made
+to better fit the coming experience future mathematicians may gain, if
+writers will not incrementally improve the notation today?
+Consider the notation of the algebraist Girolamo Cardan in his 1539
+letter to Tartaglia:
\begin{quote}
[T]he cube of onethird of the coefficient of the unknown is
greater in value than the square of onehalf of the
number.~\cite{mathbios}
\end{quote}
If Cardano lived today, surely he would express the same thought in the
+If Cardan lived today, surely he would express the same thought in the
form
\[
\left(\frac{a}{3}\right)^3 > \left(\frac{x}{2}\right)^2.
@@ 36,21 +41,26 @@ Good notation matters.
\index{$\pi$}
\index{$2\pi$}
Although this book has no brief to overhaul applied mathematical
notation generally, it does seek to aid the honorable cause of
notational evolution in a few specifics. For example, the book
+Although this book has neither brief nor wish to overhaul applied
+mathematical notation generally, the book does seek to aid the
+honorable cause of notational evolution in a few specifics. For
+example, the book
sometimes treats~$2\pi$ implicitly as a single symbol, so that (for
instance) the quarter revolution or right angle is expressed as $2\pi/4$
rather than as the less evocative $\pi/2$.
As a single symbol, of course,~$2\pi$ remains a bit awkward. One wants
to introduce some new symbol $\xi=2\pi$ thereto. However, it is
neither necessary nor practical nor desirable to leap straight to
notational Utopia in one great bound. It suffices in print to improve
the notation incrementally. If this book treats~$2\pi$ sometimes as a
single symbolif such treatment meets the approval of slowly evolving
conventionthen further steps, the introduction of new symbols~$\xi$
and such, can safely be left incrementally to future writers.
+to introduce some new symbol%
+\footnote{%
+ See~\cite{Palais:2001} and~\cite{Bartholomew}. There also exists
+ online a spirited ``Tau manifesto'' of which you may have heard,
+ though your author does not feel quite risqu\'e enough to
+ \emph{formally} cite a ``manifesto'' as a mathematical source!%
+}
+$\xi=2\pi$, $\tau=2\pi$ or even $\palais=2\pi$. However, having
+already stretched to reach the more substantive \S~\ref{hex:240.1},
+caution here prevails. In this book, the style~$2\pi$ is
+retained, even if the author would prefer it to fall obsolete.%
% 
@@ 59,14 +69,15 @@ and such, can safely be left incremental
\index{hexadecimal}
\index{$\mbox{0x}$}
Treating~$2\pi$ as a single symbol is a small step, unlikely to trouble
readers much. A bolder step is to adopt from the computer science
literature the important notational improvement of the hexadecimal
numeral. No incremental step is possible here; either we leap the ditch
or we remain on the wrong side. In this book, we choose to leap.
+Treating~$2\pi$ sometimes as a single symbol is a small step, unlikely
+to trouble readers much. A bolder step is to adopt from the
+computerscience literature the important notational improvement of the
+hexadecimal numeral. No incremental step is possible here; either we
+leap the ditch or we remain on the wrong side. In this book, we choose
+to leap.
Traditional decimal notation is unobjectionable for measured quantities
like 63.7~miles, $\$\:1.32$ million or $9.81\:\mr{m}/\mr{s}^2$, but its
+Traditional decimal notation seems unobjectionable for measured quantities
+like 63.7~miles, $\$1.32$ million or $9.81\:\mr{m}/\mr{s}^2$, but its
iterative tenfold structure meets little or no aesthetic support in
mathematical theory. Consider for instance the decimal numeral 127,
whose number suggests a significant idea to the computer scientist, but
@@ 75,11 +86,27 @@ signed integer storable in a byte. Much
hexadecimal notation 0x7F, which clearly expresses the idea of $2^71$.
To the reader who is not a computer scientist, the aesthetic advantage
may not seem immediately clear from the one example, but consider the
decimal number 2,147,483,647, which is the largest signed integer
+decimal numeral 2,147,483,647, which represents the largest signed integer
storable in a standard thirtytwo bit word. In hexadecimal notation,
this is $\mr{0x7FFF\,FFFF}$, or in other words $2^{\mr{0x1F}}1$. The
question is: which notation more clearly captures the idea?
+By contrast, decimal notation like 499,999 does not really convey any
+interesting \emph{mathematical} idea at all, except with regard to a
+special focus on tensa focus which is of immense practical use but
+which otherwise tells one very little about numbers, as numbers.
+Indeed, one might go so far as to say that the notation 499,999 were
+misleading, insofar as it attaches mathematically false interest to the
+idea it represents.
+
+Now, one does not wish to sell the hexadecimal numeral too hard.
+Decimal numerals are fine: the author uses them as often, and likes
+them as well, as almost anyone does. Nevertheless, the author had a
+choice when writing this book, and for \emph{this} book the hexadecimal
+numeral seemed the proper, conceptually elegant choiceproper and
+conceptually elegant enough indeed to risk deviating this far from
+conventionso that is the numeral he chose.
+
To readers unfamiliar with the hexadecimal notation, to explain very
briefly: hexadecimal represents numbers not in tens but rather in
sixteens. The rightmost place in a hexadecimal numeral represents ones;
@@ 87,18 +114,22 @@ the next place leftward, sixteens; the n
squared; the next, sixteens cubed, and so on. For instance, the
hexadecimal numeral 0x1357 means ``seven, plus five times sixteen, plus
thrice sixteen times sixteen, plus once sixteen times sixteen times
sixteen.'' In hexadecimal, the sixteen symbols 0123456789ABCDEF
respectively represent the numbers zero through fifteen, with sixteen
being written 0x10.
+sixteen'' (all of which totals to 4951 in decimal). In hexadecimal, the
+sixteen symbols 0123456789ABCDEF respectively represent the numbers zero
+through fifteen, with sixteen being written 0x10.
All this raises the sensible question: why sixteen?%
\footnote{
 An alternative advocated by some eighteenthcentury writers was twelve.
 In base twelve, one quarter, one third and one half are respectively
 written 0.3, 0.4 and 0.6. Also, the hour angles
+ An alternative~\cite[book~6, no.~83]{LaplaceOuvres}
+ advocated by some nineteenthcentury
+ writers was twelve. (Laplace, cited, was not indeed one of the
+ advocates, or at any rate was not a strong advocate; however, his
+ context appears to have lain in the promotion of base twelve by
+ contemporaries.) In base twelve, one quarter, one third and one half
+ are respectively written 0.3, 0.4 and 0.6. Also, the hour angles
(\S~\ref{trig:260}) come in neat increments of $(0.06)(2\pi)$ in base
twelve, so there are some real advantages to that base. Hexadecimal,
 however, besides having momentum from the computer science literature,
+ however, besides having momentum from the computerscience literature,
is preferred for its straightforward proxy of binary.
}
The answer is that sixteen is~$2^4$, so hexadecimal (base sixteen) is
@@ 106,36 +137,49 @@ found to offer a convenient shorthand fo
fundamental, smallest possible base). Each of the sixteen hexadecimal
digits represents a unique sequence of exactly four bits (binary
digits). Binary is inherently theoretically interesting, but direct
binary notation is unwieldy (the hexadecimal number 0x1357 is binary
+binary notation is unwieldy (the hexadecimal numeral 0x1357 is binary
$\mr{0001\,0011\,0101\,0111}$), so hexadecimal is written in proxy.
The conventional hexadecimal notation is admittedly a bit bulky and
unfortunately overloads the letters~A through~F, letters which when set
in italics usually represent coefficients not digits. However, the real
problem with the hexadecimal notation is not in the notation itself but
+Admittedly, the hexadecimal~``0x'' notation is bulky and overloads the
+letters~A through~F (letters which otherwise conventionally often
+represent matrices or indeterminate coefficients). However, the greater
+trouble with the hexadecimal notation is not in the notation itself but
rather in the unfamiliarity with it. The reason it is unfamiliar is
that it is not often encountered outside the computer science
+that it is not often encountered outside the computerscience
literature, but it is not encountered because it is not used, and it is
not used because it is not familiar, and so on in a cycle. It seems
to this writer, on aesthetic grounds, that this particular cycle is
worth breaking, so this book uses the hexadecimal for integers larger
than~9. If you have never yet used the hexadecimal system, it is worth
your while to learn it. For the sake of elegance, at the risk of
challenging entrenched convention, this book employs hexadecimal
throughout.

Observe that in some cases, such as where hexadecimal numbers are
arrayed in matrices, this book may omit the cumbersome hexadecimal
prefix~``0x.'' Specific numbers with physical units attached
appear seldom in this book, but where they do naturally decimal not
hexadecimal is used: $v_\mr{sound}=331\:\mr{m/s}$ rather than the
sillylooking $v_\mr{sound}=\mr{0x14B}\:\mr{m/s}$.
+worth breaking, so the book you are reading employs the hexadecimal system
+for integers larger than~9. If you have never yet used the hexadecimal
+system, it is worth your while to learn it. For the sake of conceptual
+elegance, at the risk of transgressing entrenched convention, this book
+employs hexadecimal throughout.
+
+The book occasionally omits the cumbersome hexadecimal prefix~``0x,''
+as for example when it arrays hexadecimal numerals in matrices (as in
+\S~\ref{gjrank:341.01} where~A is ten but, unfortunately potentially
+confusingly,~$A$, set in italics, is a matrix; and as in
+Fig.~\ref{drvtv:pasc}).
+% diagn: check that the references of the above paragraph are not later lost!
+
+\index{measure, unit of}
+\index{unit!of measure}
+\index{physical unit}
+\index{unit!physical}
+The book seldom mentions numbers with physical units of measure
+attached but, when it does, it expresses those in decimal rather than
+hexadecimal no\ta\tionfor example, $v_\mr{sound}=331\:\mr{m/s}$
+rather than $v_\mr{sound}=\mr{0x14B}\:\mr{m/s}$.
Combining the hexadecimal and~$2\pi$ ideas, we note here
for interest's sake that
\[
2\pi \approx \mr{0x6.487F}.
\]
+%(The author does not know whether or where, deep in the hexadecimal
+%digits of~$2\pi$, a sequence like $55555555$ appears but, if and where
+%it did, it would strike him as potentially rather more interesting than
+%some sequence $55555555$ deep in the \emph{decimal} digits.)
% 
@@ 162,7 +206,7 @@ interesting parts of an equation are not
and a haze of redundant little letters, strokes and squiggles, on the
one hand; and on the other hand showing enough detail that the reader
who opens the book directly to the page has a fair chance to understand
what is written there without studying the whole book carefully up to
+what is printed there without studying the whole book carefully up to
that point. Where appropriate, this book often condenses notation and
omits redundant symbols.
diff pruN 0.53.201204142/tex/hist.tex 0.56.20180123.12/tex/hist.tex
 0.53.201204142/tex/hist.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/hist.tex 20180123 23:44:59.000000000 +0000
@@ 5,20 +5,21 @@
\index{Black, Thaddeus~H.\ (1967)}
The book in its present form is based on various unpublished drafts and
notes of mine, plus some of my wife Kristie's (n\'ee Hancock), going
+notes of mine, plus a few of my wife Kristie's (n\'ee Hancock), going
back to 1983 when I was fifteen years of age. What prompted the contest
I can no longer remember, but the notes began one day when I challenged
a highschool classmate to prove the quadratic formula. The classmate
responded that he didn't need to prove the quadratic formula because the
proof was in the class math textbook, then counterchallenged me to prove
+proof was in the class' math textbook, then counterchallenged me to prove
the Pythagorean theorem. Admittedly obnoxious (I was fifteen, after
all) but not to be outdone, I whipped out a pencil and paper on the spot
and started working. But I found that I could not prove the theorem
that day.
The next day I did find a proof in the school library,%
+The next day I did find a proof in the school's library,%
\footnote{
 A better proof is found in \S~\ref{alggeo:223}.
+ A better proof is found in \S~\ref{alggeo:223} and the introduction to
+ chapter~\ref{intro}.
}
writing it down, adding to it the proof of the quadratic formula plus
a rather inefficient proof of my own invention to the law of cosines.
@@ 51,17 +52,17 @@ mysterious to me. When later in high sc
Taylor series to calculate trigonometrics, into my growing collection of
proofs the series went.
Five years after the Pythagorean incident I was serving the U.S. Army
+Five years after the Pythagorean incident I was serving the U.S.\ Army
as an enlisted troop in the former West Germany. Although those were
the last days of the Cold War, there was no shooting war at the time, so
the duty was peacetime duty. My duty was in military signal
intelligence, frequently in the middle of the German night when there
often wasn't much to do. The platoon sergeant wisely condoned neither
novels nor cards on duty, but he did let the troops read the newspaper
after midnight when things were quiet enough. Sometimes I used the time
to study my Germanthe platoon sergeant allowed this, toobut I
owned a copy of Richard~P. Feynman's \emph{Lectures on
Physics}~\cite{Feynman} which I would sometimes read instead.
+novels nor playing cards on duty, but he did let the troops read the
+newspaper after midnight when things were quiet enough. Sometimes I
+used the time to study my Germanthe platoon sergeant allowed this,
+toobut I owned a copy of Richard~P.\ Feynman's \emph{Lectures on
+Physics}~\cite{Feynman} which sometimes I would read instead.
Late one night the battalion commander, a lieutenant colonel and West
Point graduate, inspected my platoon's duty post by surprise. A
@@ 75,7 +76,7 @@ unpolished, reading the \emph{Lectures.}
I snapped to attention. The colonel took a long look at my boots
without saying anything, as stormclouds gathered on the first sergeant's
brow at his left shoulder, then asked me what I had been reading.
+brow at his left shoulder, and then asked me what I had been reading.
``Feynman's \emph{Lectures on Physics,} sir.''
@@ 104,12 +105,11 @@ boots better polished.
In Volume I, Chapter 6, of the \emph{Lectures} there is a lovely
introduction to probability theory. It discusses the classic problem of
the ``random walk'' in some detail, then states without proof that the
+the ``random walk'' in some detail and then states without proof that the
generalization of the random walk leads to the Gaussian distribution
% diagn: add an inbook reference, of the type "which you will find in
% this book in Sect.~99.99."
+(\S~\ref{prob:100}),
\[
 p(x) = \frac{\exp(x^2/2\sigma^2)}{\sigma\sqrt{2\pi}}.
+ \Omega(x) = \frac{\exp(x^2/2\sigma^2)}{\sigma\sqrt{2\pi}}.
\]
For the derivation of this remarkable theorem, I scanned the book in
vain. One had no Internet access in those days, but besides a wellequipped
@@ 131,7 +131,7 @@ had kept the notes and even brought them
and added to them the new Gaussian proof.
That is how it has gone. To the old notes, I have added new proofs from
time to time, and although somehow I have misplaced the original
+time to time, and though I have somehow misplaced the original
highschool leaves I took to Germany with me the notes have nevertheless
grown with the passing years. These years have brought me the
good things years can bring: marriage, family and career; a good life
@@ 139,14 +139,17 @@ gratefully lived, details of which inter
unremarkable as seen from the outside. A life however can take strange
turns, reprising earlier themes. I had become an industrial building
construction engineer for a living
% diagn: add an inbook reference (or disclaimer if none) for the
% resistance reference below?
(and, appropriately enough, had most
lately added to the notes a mathematical justification of the standard
industrial building construction technique to measure the
resistancetoground of a new building's electrical grounding system),
+resistance to ground of a new building's electrical grounding
+system\footnote{%
+ % diagn: check this new footnote
+ The resistancetoground technique is too specialized to find place
+ in this book.%
+}),
when at a juncture between construction projects an unexpected
opportunity arose to pursue a Ph.D. in engineering at Virginia Tech,
+opportunity arose to pursue graduate work in engineering at Virginia Tech,
courtesy (indirectly, as it developed) of a research program not of the
United States Army as last time but this time of the United States Navy.
The Navy's research problem turned out to be in the highly mathematical
@@ 163,7 +166,7 @@ and, in due time, this book.
%was and is fourfold:
%\begin{enumerate}
% \item Number theory, mathematical recreations and odd mathematical
% \linebreak % bad break
+% \linebreak % bad break
% names interest Weisstein much more than they interest me; my own
% tastes run toward math directly useful in known physical
% applications. The selection of topics in each body of work reflects
@@ 233,8 +236,8 @@ and Weisstein%
There is an ironic personal story in this. As children in the
1970s, my brother and I had a 1959 World Book encyclopedia in our
bedroom, about twenty volumes. The encyclopedia was then a bit
 outdated (in fact the world had changed tremendously in the fifteen or
 twenty years following 1959, so it was more than a bit
+ outdated (in fact the world had changed tremendously during the
+ fifteen or twenty years following 1959, so the book was more than a bit
outdated) but the two of us still used it sometimes. Only years
later did I learn that my father, who in 1959 was fourteen years
old, had bought the encyclopedia with money he had earned
@@ 305,7 +308,8 @@ factors. The book might gain even more
formulas, and painted landscapes in place of geometric diagrams! I like
landscapes, too, but anyway you can see where that line of logic leads.
More substantively: despite the book's title, adverse criticism from
+More substantively: despite the book's title and despite the brief
+philosophical discussion in its chapter~\ref{intro}, adverse criticism from
some quarters for lack of rigor is probably inevitable; nor is such
criticism necessarily improper from my point of view. Still, serious
books by professional mathematicians tend to be \emph{for} professional
@@ 317,22 +321,18 @@ author lacking, I have written the book.
So here you have my old highschool notes, extended over
% diagn: how many years now?
twentyfive years
and through the course of
% diagn: how many degrees now?
twoandahalf university degrees,
now partly
+thirty yearsyears that include professional engineering practice and
+university study, research and teachingnow partly
typed and revised for the first time as a \LaTeX\ manuscript. Where
this manuscript will go in the future is hard to guess. Perhaps the
revision you are reading is the last. Who can say? The manuscript
met an uncommonly enthusiastic reception at Debconf~6~\cite{Debian}
May~2006 at Oaxtepec, Mexico; and in August of the same year it
warmly welcomed Karl Sarnow and Xplora Knoppix~\cite{Xplora} aboard as
the second official distributor of the book. Such developments augur
well for the book's future at least. But in the meantime, if anyone
should challenge you to prove the Pythagorean theorem on the spot, why,
whip this book out and turn to \S~\ref{alggeo:223}. That should
confound 'em.
+May~2006 at Oaxtepec, Mexicoa reception that, as far as it goes,
+augurs well for the book's future at least. In the meantime, if
+anyone should challenge you to prove the Pythagorean theorem on the
+spot, why, whip this book out and turn to chapter~\ref{intro}. That
+should confound 'em.
+
\nopagebreak
\noindent\\
diff pruN 0.53.201204142/tex/integ.tex 0.56.20180123.12/tex/integ.tex
 0.53.201204142/tex/integ.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/integ.tex 20180116 13:10:20.000000000 +0000
@@ 23,7 +23,7 @@ mathematics.
This chapter, which introduces the integral, is undeniably a hard chapter.
\index{Hamming, Richard~W. (19151998)}
+\index{Hamming, Richard~W.\ (19151998)}
Experience knows no reliable way to teach the integral adequately to the
uninitiated except through dozens or hundreds of pages of suitable
examples and exercises, yet the book you are reading cannot be that
@@ 31,9 +31,9 @@ kind of book. The sections of the prese
matters which elsewhere rightly command chapters or whole books of their
own. Concision can be a virtueand by design, nothing essential is
omitted herebut the bold novice who wishes to learn the integral from
these pages alone faces a daunting challenge. It can be done. However,
for less intrepid readers who quite reasonably prefer a gentler
initiation,~\cite{Hamming} is warmly recommended.
+these pages alone faces a daunting challenge. It can perhaps be done.
+Meanwhile, the less intrepid who prefer a gentler initiation might first
+try a good tutorial like~\cite{Hamming}.
% 
@@ 184,17 +184,17 @@ the shaded areas of Fig.~\ref{integ:220:
\end{pspicture}
\ec
\end{figure}
In the figure,~$S_1$ is composed of several tall, thin rectangles of
width 1 and height~$k$; $S_2$, of rectangles of width~$1/2$ and height
$k/2$.%
\footnote{
+In the figure,~$S_1$ is composed\footnote{%
If the reader does not fully understand this paragraph's illustration,
 if the relation of the sum to the area seems unclear, the reader is
+ if the relation of the sum to the area seems unclear, then the reader is
urged to pause and consider the illustration carefully until he does
understand it. If it still seems unclear, then the reader should
probably suspend reading here and go study a good basic calculus text
 like~\cite{Hamming}\@. The concept is important.
+ like~\cite{Hamming}\@. The concept is important.%
}
+of several tall, thin rectangles of
+width 1 and height~$k$; $S_2$, of rectangles of width~$1/2$ and height
+$k/2$.
As~$n$ grows, the shaded region in the figure looks more and more like a
triangle of base length $b=\mr{0x10}$ and height $h=\mr{0x10}$.
In fact it appears that
@@ 214,6 +214,9 @@ We have taken a shortcut directly to the
\index{limit of integration}
\index{integration!limit of}
+\index{factor of integration}
+\index{infinitesimal factor of integration}
+\index{integration!infinitesimal factor of}
In the equation
\[
S_n = \frac 1n\sum_{k=0}^{(\mr{0x10})n1} \frac{k}{n},
@@ 241,7 +244,7 @@ $\tau=\mr{0x10}$. Then
\]
in which it is conventional as~$\Delta \tau$ vanishes to change the
symbol $d\tau \la \Delta \tau$, where~$d\tau$ is the infinitesimal of
Ch.~\ref{drvtv}:
+chapter~\ref{drvtv}:
\[
S_\infty = \lim_{d\tau\ra 0^{+}} \sum_{k=0}^{(k_{\tau=\mr{0x10}})1} \tau\,d\tau.
\]
@@ 436,7 +439,6 @@ whether one regards the shaded trapezoid
the actual integration elements; the total integration area is the same
either way.%
\footnote{
 % diagn: review this footnote.
The trapezoid rule~(\ref{integ:def}) is perhaps the most
straightforward, general, robust way to define the integral, but other
schemes are possible, too. For example, taking the trapezoids in
@@ 445,9 +447,9 @@ either way.%
secondorder curve $f(\tau) \approx (c_2)(\tau\tau_{\mr{middle}})^2 +
(c_1)(\tau\tau_{\mr{middle}}) + c_0$ to the function, choosing the
coefficients~$c_2$, $c_1$ and~$c_0$ to make the curve match the
 function exactly at the pair's three sample points; then substitute
 the area under the pair's curve (which by the end of
 \S~\ref{integ:241} we shall know how to calculate exactly) for the
+ function exactly at the pair's three sample points; and then
+ substitute the area under the pair's curve (an area which, by the end
+ of \S~\ref{integ:241}, we shall know how to calculate exactly) for the
areas of the two trapezoids. Changing the symbol $\Delta\tau \la
d\tau$ on one side of the equation to suggest coarse sampling, the
result is the unexpectedly simple
@@ 508,7 +510,7 @@ remain constant from element to element,
are usually easiest to handle but variable widths find use in some
cases. The only requirement is that~$d\tau$ remain infinitesimal. (For
further discussion of the point, refer to the treatment of the Leibnitz
notation in \S~\ref{drvtv:240.25}.)
+notation in \S~\ref{drvtv:240}.)
% 
@@ 706,7 +708,9 @@ earlier in \S~\ref{alggeo:227}.
\index{operator!$+$ and~$$ as}
But then how are~$+$ and~$$ operators? They don't use any dummy
variables up, do they? Well, it depends on how you look at it.
+variables up, do they?
+
+Well, that depends on how you look at it.
Consider the sum $S = 3 + 5$. One can write this as
\[
S = \sum_{k=0}^1 f(k),
@@ 775,7 +779,7 @@ The functions $f(z) = 3z$, $f(u,v)=2uv$
linear functions. Nonlinear functions include%
\footnote{
If $3z+1$ is a \emph{linear expression,} then how is not $f(z)=3z+1$ a
 \emph{linear function?} Answer: it is partly a matter of purposeful
+ \emph{linear function?} Answer: the matter is a matter partly of purposeful
definition, partly of semantics. The equation $y=3x+1$ plots a line,
so the expression $3z+1$ is literally ``linear'' in this sense;
but the definition has more purpose to it than merely this. When you
@@ 801,7 +805,7 @@ of linear operators. For instance,%
\footnote{
You don't see~$d$ in the list of linear operators? But~$d$ in this
context is really just another way of writing~$\partial$, so, yes,~$d$
 is linear, too. See \S~\ref{drvtv:240.25}.
+ is linear, too. See \S~\ref{drvtv:240}.
}
\[
\frac{d}{dz}[f_1(z) + f_2(z)] = \frac{df_1}{dz} + \frac{df_2}{dz}.
@@ 847,7 +851,7 @@ lead the reader to the conclusion that,
% human lives, may depend on the results you predict. It is then that
% the \emph{need} for mathematical rigor will become painfully obvious
% to you. Before this time, mathematical rigor will often seem to be
% needless pedantry\mdots\ \cite[\S~1.6]{Hamming}
+% needless pedantry\mdots~\cite[\S~1.6]{Hamming}
% \end{quote}
% The author has not himself experienced such scourges for the cause of
% which Hamming warns, and observes that time spent conforming
@@ 893,8 +897,8 @@ diverge once reordered, as
\]
One cannot blithely swap operators here. This is not because swapping
is wrong, but rather because the inner sum after the swap diverges,
hence the outer sum after the swap has no concrete summand on which
to work. (\emph{Why} does the inner sum after the swap diverge?
+whence the outer sum after the swap has no concrete summand on which to
+work. (\emph{Why} does the inner sum after the swap diverge?
Answer: $1 + 1/3 + 1/5 + \cdots = [1] + [1/3 + 1/5] + [1/7 + 1/9 +
1/\mbox{0xB} + 1/\mbox{0xD} ] + \cdots > 1[1/4] + 2[1/8] +
4[1/\mbox{0x10}] + \cdots = 1/4 + 1/4 + 1/4 + \cdots$. See also
@@ 929,29 +933,29 @@ might have been to write the series as
in the first place, thus explicitly specifying equal numbers of positive
and negative terms.%
\footnote{\label{integ:240:fn19}%
 Some students of professional mathematics would assert that the
+ Some students of pure mathematics would assert that the
false conclusion had been reached through lack of rigor. Well, maybe.
This writer however does not feel sure that \emph{rigor} is quite the
 right word for what was lacking here. Professional mathematics does
+ right word for what was lacking here. Pure mathematics does
bring an elegant notation and a set of formalisms which serve ably to
spotlight certain limited kinds of blunders, but these are blunders no
less by the applied approach. The stalwart Leonhard Eulerarguably
the greatest seriessmith in mathematical historywielded his heavy
 analytical hammer in thunderous strokes before professional
+ analytical hammer in thunderous strokes before modern professional
mathematics had conceived the notation or the formalisms. If the
great Euler did without, then you and I might not always be forbidden
to follow his robust example. See also footnote~\ref{integ:240:fn20}.
 On the other hand, the professional approach is worth study
 if you have the time. Recommended introductions
+ On the other hand, the professional approach to pure mathematics is
+ worth study if you have the time. Recommended introductions
include~\cite{Knopp}, preceded if necessary
 by~\cite{Hamming} and/or \cite[Ch.~1]{Andrews}.
+ by~\cite{Hamming} and/or \cite[chapter~1]{Andrews}.
}
So specifying would have prevented the error. In the earlier example,
\[
\lim_{n\ra\infty} {\sum_{k=0}^n} \sum_{\rule{0em}{\tla}j=0}^1 \frac{()^j}{2k+j+1}
\]
likewise would have prevented the error, or at least have made the error
+would likewise have prevented the error, or at least have made the error
explicit.
The \emph{conditional convergence}%
@@ 990,7 +994,11 @@ slices, then the slices crosswise into t
are integrated over~$w$ to constitute the slice, then the slices over~$u$
to constitute the volume.
% diagn: this extended paragraph wants review.
+% The adverb "then" is here used as a conjuction. To do so breaks an
+% admittedly conventionally breakable rule of modern English, whereas
+% the book usually tends to avoid such breakages. However, the
+% sentences in which the word here appears do not read so well if the
+% breakable rule is not broken. So broken it is.
\index{integral swapping}
\index{integral!illbehaved}
\index{double integral!illbehaved}
@@ 1004,7 +1012,7 @@ whether we add the towers by rows first
first then by rows? The total volume is the same in any casealbeit
the integral over~$w$ is potentially illbehaved%
\footnote{\label{integ:240:fn20}%
 % diagn: inadvisable? I hope not, but maybe so anyway.
+ % Inadvisable? I hope not.
A great deal of ink is spilled in the applied mathematical literature
when summations and/or integrations are interchanged. The author
tends to recommend saving the ink, for pure and applied
@@ 1044,25 +1052,25 @@ integral as%
\index{mass density}
Double integrations arise very frequently in applications. Triple
integrations arise about as often. For instance, if $\mu(\ve r) =
\mu(x,y,z)$ represents the variable mass density of some soil,%
+\mu(x,y,z)$ represents the positiondependent mass density of some soil,%
\footnote{
 Conventionally the Greek letter~$\rho$ not~$\mu$ is used for density,
 but it happens that we need the letter~$\rho$ for a different purpose
 later in the paragraph.
+ Conventionally, the Greek letter~$\rho$ rather than~$\mu$ is used for
+ density. However, it happens that we shall need the letter~$\rho$ for
+ a different purpose later in the paragraph.%
}
then the total soil mass in some rectangular volume is
\[
M = \int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2}
\mu(x,y,z) \,dz\,dy\,dx.
\]
As a concise notational convenience, the last is likely to be written
+As a concise notational convenience, the last is likely to be written,
\[
M = \int_V \mu(\ve r) \,d\ve r,
\]
where the~$V$ stands for ``volume'' and is understood to imply a triple
integration. Similarly for the double integral,
\[
 V = \int_S f(\we\rho) \,d\we\rho,
+ Q = \int_S f(\we\rho) \,d\we\rho,
\]
where the~$S$ stands for ``surface'' and is understood to imply a double
integration.
@@ 1093,15 +1101,258 @@ By composing and solving appropriate int
peri\me\ters, areas and volumes of interesting common shapes and
solids.
% diagn: The area under, say, a thirdorder curve ought to be calculated
% here before trying the circleand a diagram should be given.
+\subsection{The area within a parabola}
+\label{integ:241.07}
+\index{area}
+\index{parabola!area within}
+\index{area!within a parabola}
+
+Figure~\ref{integ:241:fig0} depicts an element of the area within the
+parabola
+\bq{integ:241:071}
+ y = ax^2.
+\eq
+\begin{figure}
+ \caption{The area within a parabola.}
+ \label{integ:241:fig0}
+ \bc
+ {
+ \nc\xax{5}
+ \nc\xbx{1.0}
+ \nc\xcx{ 5}
+ \nc\xdx{ 3.3}
+ \nc\ttax{2.1}
+ \nc\ttay{0.3}
+ \nc\ttby{2.6}
+ \nc\ttcx{1.300} % ell
+ \nc\ttcy{2.028} % a*ell*ell, a = 1.20
+ \nc\ttdx{0.40}
+ \nc\ttex{0.60}
+ \nc\ttdy{0.192}
+ \nc\ttey{0.432}
+ \nc\ttm{0.10}
+ \nc\ttmx{0.30}
+ \nc\ttmy{0.60}
+ \nc\ttma{0.40}
+ \nc\ttmb{0.80}
+ \nc\ttmc{2.00}
+ \nc\ttnx{0.30}
+ \nc\ttpx{1.85}
+ \nc\ttqx{0.50}
+ \nc\ttqy{2.35}
+ \nc\ttrx{0.10}
+ \nc\ttsx{0.30}
+ \begin{pspicture}(\xax,\xbx)(\xcx,\xdx)
+ \psset{dimen=middle}
+ \small
+ %\psframe[linewidth=0.5pt](\xax,\xbx)(\xcx,\xdx)%
+ \pspolygon[linewidth=0.5pt,fillstyle=solid,fillcolor=lightgray]
+ (\ttdx,\ttdy)(\ttdx,\ttcy)(\ttex,\ttcy)(\ttex,\ttey)%
+ \psset{linewidth=2.0pt}%
+ \psplot[plotpoints=300]{1.3}{1.3}{
+ x dup mul 1.20 mul
+ }%
+ \psline{CC}(\ttcx,\ttcy)(\ttcx,\ttcy)
+ \psset{linewidth=0.5pt}%
+ \psplot[plotpoints=30]{1.40}{1.3}{
+ x dup mul 1.20 mul
+ }%
+ \psplot[plotpoints=30]{1.3}{1.40}{
+ x dup mul 1.20 mul
+ }%
+ \psline(\ttax,0)(\ttax,0)\uput[r](\ttax,0){$x$}%
+ \psline(0,\ttay)(0,\ttby)\uput[u](0,\ttby){$y$}%
+ \rput(0,\ttmy){%
+ \psline(\ttcx, \ttmb)(\ttcx,\ttmc)%
+ \psline( \ttcx, \ttmb)( \ttcx,\ttmc)%
+ \psline(\ttcx,\ttm)(\ttcx,\ttma)%
+ \psline( \ttcx,\ttm)( \ttcx,\ttma)%
+ \psline{<}(\ttcx,0)(\ttnx,0)%
+ \psline{<}( \ttcx,0)( \ttnx,0)%
+ \rput(0,0){$2\ell$}%
+ }%
+ \rput(\ttpx,0){%
+ \psline(\ttm,\ttcy)(\ttm,\ttcy)%
+ \psline{<}(0,0)(0,0.70)%
+ \psline{<}(0,\ttcy)(0,1.20)%
+ \rput(0,0.950){$a\ell^2$}%
+ }%
+ \rput(\ttqx,\ttqy){%
+ \psline{>}(\ttsx,0)(\ttrx,0)%
+ \psline{>}( \ttsx,0)( \ttrx,0)%
+ \psline(\ttrx,\ttm)(\ttrx,\ttm)%
+ \psline( \ttrx,\ttm)( \ttrx,\ttm)%
+ \rput(0.54,0){$dx$}
+ }%
+ \end{pspicture}
+ }
+ \ec
+\end{figure}%
+The elementwhich is shaded in the figureis nearly rectangular (or, if you
+prefer, nearly trapezoidal), and indeed is the more nearly rectangular
+(trapezoidal) the smaller~$dx$ is made to be.
+Evidently, therefore, the element's area is
+$A_{\mr{rectangle}} = (a\ell^2ax^2)dx$. Integrating the many rectangles, we
+find the area within the parabola to be
+\[
+ \begin{split}
+ A_\mr{parabola} &= \int_{x=\ell}^{\ell} \! A_\mr{rectangle}
+ = \int_{\ell}^{\ell} (a\ell^2ax^2)dx
+ \\&= a\ell^2 \int_{\ell}^{\ell} \,dx
+  a\int_{\ell}^{\ell} x^2\,dx
+ \\&= a\ell^2\bigg[x\bigg]_{x=\ell}^{\ell}
+  a\bigg[\frac{x^3}{3}\bigg]_{x=\ell}^{\ell}.
+ \end{split}
+\]
+But $[x]_{x=\ell}^{\ell}=(\ell)(\ell)=2\ell$
+and $[x^3/3]_{x=\ell}^{\ell}=(\ell)^3/3(\ell)^3/3=2\ell^3/3$, so
+\bq{integ:241:074}
+ A_\mr{parabola} = \frac{4a\ell^3}{3}.
+\eq
+
+\subsection{The length of a parabola}
+\label{integ:241.08}
+\index{length}
+\index{parabola!length of}
+\index{length!of a parabola}
+
+Section~\ref{integ:241.07} has computed the area within the parabola of
+Fig.~\ref{integ:241:fig0}, but what if one wishes instead to compute the
+parabola's \emph{length?} According to Pythagoras,~(\ref{alggeo:pythag}),
+\[
+ (ds)^2 = (dx)^2 + (dy)^2,
+\]
+the~$ds$ being an element of the curve's length. Taking the derivative
+of~(\ref{integ:241:071}),
+\[
+ dy = 2ax\,dx,
+\]
+so
+\[
+ (ds)^2 = (dx)^2 + (2ax\,dx)^2,
+\]
+or, solving for~$ds$,
+\[
+ ds = dx\sqrt{1+(2ax)^2}.
+\]
+Integrating,
+\[
+ s = \int_{x=\ell}^{\ell}ds
+ = \int_{\ell}^{\ell}dx\sqrt{1+(2ax)^2}
+ = 2\int_{0}^{\ell}dx\sqrt{1+(2ax)^2},
+\]
+the last step of which observes that, symmetrically, the parabola's left
+half does not differ in length from its right. Defining
+\[
+ u \equiv 2ax,
+\]
+whose derivative is
+\[
+ du = 2a\,dx,
+\]
+permits~$s$ to be expressed in a slightly simpler form,
+\bq{integ:241:082}
+ s = \frac 1 a \int_{0}^{2a\ell}du\sqrt{1+u^2},
+\eq
+but after this it is not obvious what should be done next.
+
+\index{hyperbolic arcsine}
+Several techniques can be tried, most of which however seem to fail
+against an integrand like $\sqrt{1+u^2}$. Paradoxically, a modified
+integrand like $u\sqrt{1+u^2}$, which looks more complicated, would have been
+easier to handle, for the technique of%
+\footnote{%
+ This is a forward reference. It is given for information. You
+ need not follow it for now, for the present section's logic does
+ not depend on it.%
+}
+\S~\ref{inttx:220}
+would have resolved it neatly;
+% Do not delete but retain the following, which resolves
+% the above aside.
+%\[
+% \begin{split}
+% I &= \int_{0}^{t}du\,u\sqrt{1+u^2} \\
+% w^2 &\equiv 1+u^2 \\
+% w\,dw &= u\,du \\
+% I &= \int_{1}^{\sqrt{1+t^2}}w^2\,dw
+% \end{split}
+%\]
+whereas neither the technique of \S~\ref{inttx:220} nor any of several
+others seems to make headway against the simplerlooking~$\sqrt{1+u^2}$.
+Nevertheless, after some trial and error, one may recall Table~\ref{cexp:drvi}
+which affords a clue. From the table,\footnote{%
+ The table's relevant entry includes a~$\pm$ sign but only
+ the~$+$ sign interests us here. Why only? Because we shall have to
+ choose one branch or the other of the hyperbolic arcsine along which
+ to work whether we will or nill. Nothing prevents us from choosing
+ the positive branch.
+
+ You can try the negative branch if you wish. After some signs
+ have canceled, you will find that the negative branch arrives at the
+ same result.%
+}%
+\bq{integ:241:083a}
+ \frac{d}{du} \mopx{arcsinh} u = \frac{1}{\sqrt{1+u^2}},
+\eq
+the right side of which resembles our integrand $\sqrt{1+u^2}$. To the above
+derivative we can append two more,
+\begin{eqnarray}
+ \frac{d}{du} \sqrt{1+u^2} &=& \frac{u}{\sqrt{1+u^2}},\label{integ:241:083b}\\
+ \frac{d}{du} u\sqrt{1+u^2} &=& \sqrt{1+u^2} + \frac{u^2}{\sqrt{1+u^2}},\label{integ:241:083cc}
+\end{eqnarray}
+computed by the chain and product rules of \S~\ref{drvtv:250}. The last
+includes the expression $u^2/\sqrt{1+u^2}$ which, after adding and
+subtracting $1/\sqrt{1+u^2}$, one can alternately write as
+$u^2/\sqrt{1+u^2} = (1+u^2)/\sqrt{1+u^2}  1/\sqrt{1+u^2} = \sqrt{1+u^2}
+ 1/\sqrt{1+u^2}$. Thus,
+\bq{integ:241:083c}
+ \frac{d}{du} u\sqrt{1+u^2} = 2\sqrt{1+u^2}  \frac{1}{\sqrt{1+u^2}}.
+\eq
+Not all the derivatives of this paragraph turn out to be useful to the
+present problem but~(\ref{integ:241:083a}) and~(\ref{integ:241:083c}) do. The
+average of those two is
+\bq{integ:241:084}
+ \frac{d}{du} \left(\frac{ u\sqrt{1+u^2} + \mopx{arcsinh} u }{2} \right)
+ = \sqrt{1+u^2},
+\eq
+whose right side matches our integrand.
+
+Why should one care that the right side of the average~(\ref{integ:241:084})
+matches our integrand? Because the match lets one use the fundamental
+theorem of calculus,~(\ref{integ:antider}) of \S~\ref{integ:230}. Applying the
+average, according to the fundamental theorem, to~(\ref{integ:241:082}),
+\[
+ s = \frac 1 a \left[\frac{ u\sqrt{1+u^2} + \mopx{arcsinh} u }{2} \right]_{u=0}^{2a\ell}.
+\]
+Evaluating,
+\bq{integ:241:085}
+ s = \ell\sqrt{1+(2a\ell)^2} + \frac{1}{2a}\mopx{arcsinh} 2a\ell,
+\eq
+or, if you prefer, expanding~$\mopx{arcsinh}(\cdot)$ according to
+Table~\ref{cexp:tblprop},
+\bq{integ:241:086}
+ s = \ell\sqrt{1+(2a\ell)^2} + \frac{1}{2a}\ln\left[
+ 2a\ell + \sqrt{1 + (2a\ell)^2 }\right]
+\eq
+(in which we have chosen the~$+$ sign for the table's~$\pm$ because the~$$
+sign would have returned a complex length). This is the parabola's length,
+measured along its curve.
+
+That wasn't so easy. If you are not sure that you have followed it, that's
+all right, for you can return to study it again later. Also, you can
+learn more about the parabola in \S~\ref{vector:280.05} and, in
+\S~\ref{inttx:440}, more too about the technique by which the present
+subsection has evaluated~(\ref{integ:241:082}). Meanwhile, fortunately,
+the next subsection will be easier and also more interesting. It
+computes the area of a circle.
\subsection{The area of a circle}
\label{integ:241.10}
\index{area}
\index{area!surface}
\index{shape!area of}
\index{circle!area of}
+\index{area!of a circle}
Figure~\ref{integ:241:fig1} depicts an element of a circle's area.
\begin{figure}
@@ 1155,26 +1406,43 @@ infinitesimally narrow, the wedge is ind
of base length~$\rho\,d\phi$ and height~$\rho$. The area of such a
triangle is $A_\mr{triangle}=\rho^2\,d\phi/2$. Integrating the many
triangles, we find the circle's area to be
\bq{integ:241:Acircle}
+\[
A_\mr{circle} = \int_{\phi=\pi}^{\pi} \! A_\mr{triangle}
= \int_{\pi}^{\pi} \frac{\rho^2\,d\phi}{2}
 = \frac{2\pi \rho^2}{2}.
+ = \left.\frac{\rho^2\phi}{2}\right_{\phi=\pi}^{\pi}.
+\]
+Evaluated,
+\bq{integ:241:Acircle}
+ A_\mr{circle} = \frac{2\pi \rho^2}{2}.
\eq
(The numerical value of~$2\pi$the circumference or perimeter of the
unit circlewe have not calculated yet. We will calculate it in
\S~\ref{taylor:355}.)
+\S~\ref{taylor:355}. The reason to write~$2\pi/2$ rather
+than the deceptively simplerlooking~$\pi$ is that the symbol~$\pi$
+alone obscures the sense in which the circle resembles a rolledup triangle.
+See appendix~\ref{hex}\@. Sometimes the book uses the symbol~$\pi$ alone,
+anyway, just to reduce visual clutter; but that an
+alternate symbol like\footnote{%
+ \index{Palais, Bob}%
+ The symbol~$\palais$ has no name of which the writer is aware. One might
+ provisionally call it ``palais'' after the mathematician who has suggested
+ it.~\cite{Palais:2001}%
+}
+$\palais=2\pi$ is not current is unfortunate. If such a symbol were current,
+then we could have written that $A_\mr{circle} = \palais \rho^2/2$.)
\subsection{The volume of a cone}
\label{integ:241.20}
\index{cone!volume of}
\index{pyramid!volume of}
+\index{volume!of a cone or pyramid}
\index{normal vector or line}
\index{vertex}
One can calculate the volume of any cone (or pyramid) if one knows its
base area~$B$ and its altitude~$h$ measured normal%
+base area~$B$ and its altitude~$h$ measured normally%
\footnote{
 \emph{Normal} here means ``at right angles.''
+ \emph{Normally} here means ``at right angles.''
}
to the base. Refer to Fig.~\ref{integ:241:fig2}.
\begin{figure}
@@ 1200,16 +1468,16 @@ to the base. Refer to Fig.~\ref{integ:2
\psset{linewidth=1.0pt}
\small
\psccurve[linewidth=0pt,linecolor=lightgray,fillstyle=solid,fillcolor=lightgray]
 (0.8, 0.1)(1.0,0.0)(0.8,0.1)(0.3,0)(0.8,0.1)(1.0,0.0)(0.8, 0.1)
 \psecurve[linestyle=solid] (0.8, 0.1)(1.0,0.0)(0.8,0.1)(0.3,0)(0.8,0.1)(1.0,0.0)(0.8, 0.1)
 \psecurve[linestyle=dashed](0.8,0.1)(1.0,0.0)(0.8, 0.1)(0.8, 0.1)(1.0,0.0)(0.8,0.1)
+ (0.8, 0.1)(1.0,0.0)(0.8,0.1)(0.3,0.05)(0.5,0.18)(0.8,0.1)(1.0,0.0)(0.8, 0.1)(0.45,0.13)
+ \psecurve[linestyle=solid] (0.8, 0.1)(1.0,0.0)(0.8,0.1)(0.3,0.05)(0.5,0.18)(0.8,0.1)(1.0,0.0)(0.8, 0.1)
+ \psecurve[linestyle=dashed](0.8,0.1)(1.0,0.0)(0.8, 0.1)(0.45,0.13)(0.8, 0.1)(1.0,0.0)(0.8,0.1)
\psline(1.0,0)(0.3,1.8)(1.0,0)
{
\psset{linewidth=0.5pt}
\psline[linestyle=dashed](0.3,1.8)(0.3,0)
\psline[linewidth=0.5pt](0.0,0)(0.0,0.3)(0.3,0.3)
\rput(0.1,0.8){$h$}
 \psline(0.6,0.03)(0.75,0.35)
+ \psline(0.61,0.07)(0.75,0.35)
\rput(0.79,0.52){$B$}
}
\end{pspicture}
@@ 1238,12 +1506,16 @@ the cone's vertex, then the crosssectio
nothing other than $(B)(z/h)^2$, regardless of the base's shape.
}
$(B)(z/h)^2$. For this reason, the cone's volume is
\bq{integ:241:Vcone}
+\[
V_\mr{cone}
= \int_0^h (B)\left(\frac{z}{h}\right)^2\,dz
= \frac{B}{h^2} \int_0^h z^2\,dz
+ = \frac{B}{h^2} \left[\frac{z^3}{3}\right]_{z=0}^{h}
= \frac{B}{h^2} \left(\frac{h^3}{3}\right)
 = \frac{Bh}{3}.
+\]
+Evaluating,
+\bq{integ:241:Vcone}
+ V_\mr{cone} = \frac{Bh}{3}.
\eq
\subsection{The surface area and volume of a sphere}
@@ 1254,6 +1526,7 @@ $(B)(z/h)^2$. For this reason, the cone
\index{surface area}
\index{solid!surface area of}
\index{sphere!surface area of}
+\index{area!of a sphere's surface}
\index{strip, tapered}
\index{tapered strip}
\index{equator}
@@ 1370,14 +1643,13 @@ one wants to calculate both the surface
\ec%
\end{figure}
For the surface area, the sphere's surface is sliced vertically down the~$z$
axis into narrow constant$\phi$ tapered strips (each strip broadest
+axis into narrow, constant$\phi$, tapered strips (each strip broadest
at the sphere's equator, tapering to points at the sphere's~$\pm z$
poles) and horizontally across the~$z$ axis into narrow
+poles) and horizontally across the~$z$ axis into narrow,
constant$\theta$ rings, as in Fig.~\ref{integ:figsphere2}. A surface
element so produced (seen as shaded in the latter figure) evidently has
the area%
\footnote{
 % diagn: this new footnote wants review.
It can be shown, incidentallythe details are left as an
exercisethat $dS = r \,dz\,d\phi$ also. The subsequent
integration arguably goes a little easier if~$dS$ is accepted in this
@@ 1462,7 +1734,7 @@ does one check the result?%
decimal, it's $1131/13 = 87$.
Actually, hexadecimal is just proxy for binary (see
 Appendix~\ref{hex}), and long division in straight binary is kind of
+ appendix~\ref{hex}), and long division in straight binary is kind of
fun. If you have never tried it, you might. It is simpler than
decimal or hexadecimal division, and it's how computers divide. The
insight gained is worth the trial.
@@ 1503,7 +1775,7 @@ Differentiating~(\ref{integ:245:20}) wit
\end{split}
\eq
Either line of~(\ref{integ:245:24}) can be used to check an
integration. Evaluating~(\ref{integ:245:20}) at $b=a$ yields
+integration. Evaluating~(\ref{integ:245:20}) at $b=a$ yields that
\bq{integ:245:26}
S_{b=a} = 0,
\eq
@@ 1527,14 +1799,15 @@ serve such \emph{indefinite integrals.}
\index{differentiation!analytical versus numeric}
\index{integration!analytical versus numeric}
It is a rare irony of mathematics that, although numerically
+It is a rare irony of mathematics that, though numerically
differentiation is indeed harder than integration, analytically
precisely the opposite is true. Analytically, differentiation is the
easier. So far the book has introduced only easy integrals, but
Ch.~\ref{inttx} will bring much harder ones. Even experienced
mathematicians are apt to err in analyzing these. Reversing an
integration by taking an easy derivative is thus an excellent way to
check a hardearned integration result.
+the opposite is true. Analytically, differentiation is the
+easier. So far, mostly, the integrals the book has introduced have been
+easy ones (\S~\ref{integ:241.08} excepted), but chapter~\ref{inttx} will
+bring harder ones. Even experienced mathematicians are apt to err in
+analyzing these. Reversing an integration by taking a relatively easy
+derivative is thus an excellent way to check a hardearned integration
+result.
% 
@@ 1670,11 +1943,17 @@ One can write this more concisely in the
\[
x(t) = u(tt_o) x_o,
\]
where $u(t)$ is the \emph{Heaviside unit step,}
+where $u(t)$ is the \emph{Heaviside unit step,}\footnote{%
+ Whether $u(0) = 0$, $u(0) = 1/2$ or $u(0) = 1$ seldom
+ matters. The definition as printed has that $u(0) = 1$ for the
+ convenience of the Laplace transform of \S~\ref{fouri:200}, but this
+ is an artificial reason. In other contexts, you can alter or ignore
+ the definition on the edge as needed.%
+}
\bq{integ:670:10}
u(t) \equiv \begin{cases}
0, & t < 0; \\
 1, & t > 0;
+ 1, & t \ge 0;
\end{cases}
\eq
plotted in Fig.~\ref{integ:670:figu}.
@@ 1786,7 +2065,7 @@ for any function $f(t)$. (Equation~\ref
\footnote{
It seems inadvisable for the narrative to digress at this point to
explore $u(z)$ and $\delta(z)$, the unit step and delta of a complex
 argument, although by means of Fourier analysis (Ch.~\ref{fouri})
+ argument, though by means of Fourier analysis (chapter~\ref{fouri})
or by conceiving the Dirac delta as an infinitely narrow Gaussian
pulse (\S~\ref{fouri:130})
it could perhaps do so. The book has more pressing topics to treat.
@@ 1812,11 +2091,13 @@ for any function $f(t)$. (Equation~\ref
It's a little like the sixfingered man in Goldman's \emph{The
Princess Bride}~\cite{Goldman}\@. If I had established a definition
 of ``nobleman'' which subsumed ``human,'' whose relevant traits in my
+ of ``nobleman'' which assumed ``human,'' the latter of
+ whose relevant traits in my
definition included five fingers on each hand, when the sixfingered
Count Rugen appeared on the scene, then you would expect me to adapt
my definition, wouldn't you? By my pre\"existing definition, strictly
 speaking, the sixfingered count is ``not a nobleman''; but such
+ speaking, the sixfingered count is by implication ``not a nobleman'';
+ but such
exclusion really tells one more about flaws in the definition than it
does about the count.
@@ 1904,10 +2185,15 @@ $\mbox{}^{*}\mbox{asterisks}$. Work the
(a)~ $\int_1^x \sqrt{1+2\tau} \,d\tau$;\ \ %
(b)~ $\int_x^a [(\cos\sqrt\tau)/\sqrt\tau] \,d\tau.$
\item $\mbox{}^{*}\mbox{Evaluate}$%
 \footnote{\cite[back endpaper]{Shenk}}
+ \footnote{Parts~(a) and~(b) are sourced from \cite[back endpaper]{Shenk}.}
(a)~$\int_0^x [1/(1+\tau^2)] \,d\tau$ (answer: $\arctan x$);\ \ %
(b)~$\int_0^x [(4+i3)/\sqrt{23\tau^2}] \,d\tau$ (hint: the answer
 involves another inverse trigonometric).
+ involves another inverse trigonometric);
+ (c)~$\int_0^x \sqrt{1\tau^2}\,d\tau$;
+ (d)~$\int_0^x \tau\sqrt{1\tau^2}\,d\tau$ (hint: use a different
+ technique than for part~c);
+ (e)~$\int_0^x \tau^2\sqrt{1\tau^2}\,d\tau$ (hint: use a similar
+ technique as for part~c).
\item $\mbox{}^{**}\mbox{Evaluate}$
(a)~$\int_{\infty}^x\exp[\tau^2/2] \,d\tau$; \ \ %
(b)~$\int_{\infty}^{\infty}\exp[\tau^2/2] \,d\tau$.
@@ 1933,6 +2219,6 @@ diverse integrals is well worth cultivat
Chapter~\ref{inttx} introduces some of the basic, most broadly useful
integralsolving techniques. Before addressing techniques of
integration, however, as promised earlier we turn our attention in
Chapter~\ref{taylor} back to the derivative, applied in the form of
+chapter~\ref{taylor} back to the derivative, applied in the form of
the Taylor series.
diff pruN 0.53.201204142/tex/intro.tex 0.56.20180123.12/tex/intro.tex
 0.53.201204142/tex/intro.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/intro.tex 20180114 16:55:41.000000000 +0000
@@ 5,20 +5,116 @@
\index{derivation}
\index{proof}
This is a book of applied mathematical proofs. If you have seen a
mathematical result, if you want to know why the result is so, you can
look for the proof here.
+\index{Pythagorean theorem}
+\index{Pythagoras (c.~580c.~500~B.C.)}
+\index{diagonal}
+\index{hypotenuse}
+\index{leg}
+The Pythagorean theorem holds that
+\bq{alggeo:pythag}
+ a^2 + b^2 = c^2,
+\eq
+where~$a$, $b$ and~$c$ are the lengths of the legs and diagonal of a
+right triangle as in Fig.~\ref{alggeo:223:25}.
+\begin{figure}
+ \caption{A right triangle.}
+ \label{alggeo:223:25}
+ \bc
+ {
+ \nc\xax{5}
+ \nc\xbx{1}
+ \nc\xcx{ 4}
+ \nc\xdx{ 3}
+ \begin{pspicture}(\xax,\xbx)(\xcx,\xdx)
+ \psset{dimen=middle}
+ \small
+ %\psframe[linewidth=0.5pt](\xax,\xbx)(\xcx,\xdx)
+ \psset{linewidth=2.0pt}
+ \psline{CC}(0,0)(0,2.6)(1.4,0)(0,0)
+ \rput(.7,.3){$a$}
+ \rput(.3,1.3){$b$}
+ \rput(1.0,1.45){$c$}
+ \nc\tmpd{0.3}
+ \psline[linewidth=0.5pt](\tmpd,0)(\tmpd,\tmpd)(0,\tmpd)
+ \end{pspicture}
+ }
+ \ec
+\end{figure}
+Many proofs of the theorem are known.
+
+\index{square!rotated}
+\index{square!tilted}
+\index{area}
+One such proof posits a square of side length $a+b$ with a tilted square
+of side length~$c$ inscribed as in Fig.~\ref{alggeo:223:35}.
+\begin{figure}
+ \caption{The Pythagorean theorem.}
+ \label{alggeo:223:35}
+ \bc
+ {
+ \nc\xax{5}
+ \nc\xbx{3}
+ \nc\xcx{ 5}
+ \nc\xdx{ 3}
+ \begin{pspicture}(\xax,\xbx)(\xcx,\xdx)
+ \psset{dimen=middle}
+ \small
+ %\psframe[linewidth=0.5pt](\xax,\xbx)(\xcx,\xdx)
+ \psset{linewidth=2.0pt}
+ \nc\tmpa{0.6}
+ \nc\tmpb{1.97}
+ \nc\tmpc{2.0}
+ \psline{CC}( \tmpa,\tmpb)( \tmpb, \tmpa)(\tmpa, \tmpb)(\tmpb,\tmpa)( \tmpa,\tmpb)
+ \psline{CC}( \tmpc,\tmpc)( \tmpc, \tmpc)(\tmpc, \tmpc)(\tmpc,\tmpc)( \tmpc,\tmpc)
+ \rput(1.3,2.3){$a$}
+ \rput(2.3,.7){$b$}
+ \rput(.7,2.3){$b$}
+ \psline[linewidth=0.5pt](\tmpa,2.0)(\tmpa,2.3)
+ \rput(1.0,.55){$c$}
+ \nc\tmpd{0.3}
+ \rput(2,2){
+ \psline[linewidth=0.5pt](\tmpd,0)(\tmpd,\tmpd)(0,\tmpd)
+ }
+ \rput(\tmpa,\tmpb)
+ {
+ \rput{28.30}(0,0)
+ {
+ \psline[linewidth=0.5pt](\tmpd,0)(\tmpd,\tmpd)(0,\tmpd)
+ }
+ }
+ \end{pspicture}
+ }
+ \ec
+\end{figure}
+The area of each of the four triangles in the figure is evidently
+$ab/2$. The area of the tilted inner square is~$c^2$. The area of the
+large outer square is $(a+b)^2$. But the large outer square is
+comprised of the tilted inner square plus the four triangles, so the
+area of the large outer square equals the area of the tilted inner
+square plus the areas of the four triangles. In mathematical symbols,
+this is that
+\[
+ (a+b)^2 = c^2 + 4\left(\frac{ab}{2}\right),
+\]
+which simplifies directly to~(\ref{alggeo:pythag}).
+
+If you like the foregoing, then you might like the book.
+
+This book is a book of applied mathematical proofs. When you have seen
+a mathematical result somewhere, if you want to know why the result is
+so, then you can look for the proof here.
The book's purpose is to convey the essential ideas underlying the
derivations of a large number of mathematical results useful in the
modeling of physical systems. To this end, the book emphasizes
main threads of mathematical argument and the motivation underlying the
main threads, de\"emphasizing formal mathematical rigor. It derives
mathematical results from the purely applied perspective of the
scientist and the engineer.

The book's chapters are topical. This first chapter
treats a few introductory matters of general interest.
+modeling of physical systems. To this end, the book emphasizes main
+threads of mathematical argument and the motivation underlying the main
+threads, de\"emphasizing formal mathematical rigor. It derives
+mathematical results from the applied perspective of the scientist and
+the engineer.
+
+The book's chapters are topical. This first chapter explains the book's
+philosophy and otherwise treats a few introductory matters of general
+interest.
% 
@@ 32,10 +128,10 @@ What is applied mathematics?
%
\begin{quote}
Applied mathematics is a branch of mathematics that concerns itself
 with the application of mathematical knowledge to other
 domains\mdots The question of what is applied mathematics does not
 answer to logical classification so much as to the sociology of
 professionals who use mathematics.~\cite{defappliedmath}
+ with the application of mathematical knowledge to other domains\mdots
+ The question of what is applied mathematics does not answer to logical
+ classification so much as to the sociology of professionals who use
+ mathematics.~\cite{defappliedmath}
\end{quote}
%
That is about right, on both counts. In this book we shall define
@@ 44,26 +140,35 @@ scientists, engineers and the like; proc
welldefined sets of axioms but rather directly from a nebulous mass of
natural arithmetical, geometrical and classicalalgebraic idealizations
of physical systems; demonstrable but generally lacking the detailed
rigor of the professional mathematician.
+rigor of the pure, professional mathematician.
% 
\section{Rigor}
\label{intro:284}
\index{rigor}
+\index{formal mathematical rigor}
\index{professional mathematics}
\index{pure mathematics}
\index{mathematics!professional or pure}
\index{mathematician!professional}
+\index{philosophy}
It is impossible to write such a book as this without some discussion of
mathematical rigor. Applied and pure mathematics differ
principally and essentially in the layer of abstract definitions the
latter subimposes beneath the physical ideas the former seeks to model.
Notions of mathematical rigor fit far more comfortably in the abstract
realm of the professional mathematician; they do not always translate so
gracefully to the applied realm. The applied mathematical reader should
be aware of this difference.
+\index{definition!abstract}
+\index{subimposition}
+\index{algebra!formal}
+\index{formal algebra}
+Applied and pure mathematics differ principally and essentially in the
+layer of abstract definitions the latter subimposes beneath the physical
+ideas the former seeks to model. That subimposed layer, the disciplined
+use of it, and the formal algebra associated with it may together be
+said to institute pure mathematical \emph{rigor.}
+
+Such pure mathematical rigor tends to dwell more comfortably in lone
+reaches of the professional mathematician's realm than among the hills
+and plats of applications, where it does not always travel so
+gracefully. If this book will be a book of mathematical derivations,
+then it might speak a little of rigor here at the start.
\subsection{Axiom and definition}
\label{intro:284.2}
@@ 71,11 +176,42 @@ be aware of this difference.
\index{definition}
\index{irreducibility}
Ideally, a professional mathematician knows or precisely specifies in
advance the set of fundamental axioms he means to use to derive a
result. A prime aesthetic here is irreducibility:
no axiom in the set should overlap the others or be specifiable in terms
of the others. Geometrical argumentproof by sketchis distrusted.
+\index{pedantry}
+\index{infinity}
+\index{axiomatic method}
+Whether explicitly or implicitly, the professional mathematician usually
+founds his rigor upon what he calls the \emph{axiomatic method}an
+\emph{axiom}, according to Webster, being ``a selfevident and necessary
+truth, or a proposition whose truth is so evident at first sight that no
+reasoning or demonstration can make it plainer; a proposition which it
+is necessary to take for granted.''%
+\footnote{\cite{Webster1913}}
+
+\index{set}
+For example, the following could be an axiom: ``For every set~$A$ and
+every set~$B$, $A=B$ if and only if for every set~$x$, $x$ is a member
+of~$A$ if and only if~$x$ is a member of~$B$.''\footnote{%
+ The source quoted is \cite[\S~2.1]{Wilson}, which however uses the
+ symbol~$\in$ for ``is a member of.''%
+}
+
+Axioms undergird the work of the professional mathematician.
+Indeed, so fundamental are axioms to the professional mathematician's
+work that\emph{ideally and at least in principle}it may be that
+the professional
+will derive nothing until he has first declared the axioms upon which his
+derivations will rely; that is, until he has stated the least premises
+upon which he will argue. Conversely, aiming deeperpromoting the
+latter of Webster's two readingsthe professional can
+illuminate the wisdom latent in his very axioms by their use in a
+suitable derivation.%
+\footnote{\cite[Einleitung]{Hilbert:geometrie}}
+Irreducibility is a prime aesthetic on either
+level: at best, no axiom should overlap the others or be specifiable in
+terms of the others. Nonaxiomatic geometrical argumentproof by
+sketch if you like, as the Pythagorean with its figures at the head
+of this chapteris distrusted.\footnote{\cite[chapters~1
+and~2]{Weierstrass:werke}}
The professional mathematical literature discourages undue pedantry
indeed, but its readers do implicitly demand a convincing assurance that
its writers \emph{could} derive results in pedantic detail if called
@@ 85,13 +221,16 @@ statements such as that
\[
\frac{1}{0} = \infty,
\]
without first inquiring as to exactly what is meant by symbols like~$0$
and~$\infty$.
+among others, without first inquiring as to exactly what is meant by
+symbols like~$0$ and~$\infty$.
\index{model}
\index{geometrical argument}
\index{proof!by sketch}
\index{sketch, proof by}
+\index{bridge}
+\index{soil}
+\index{wind}
The applied mathematician begins from a different base. His ideal lies
not in precise definition or irreducible axiom, but rather in the
elegant modeling of the essential features of some physical system.
@@ 99,9 +238,10 @@ Here, mathematical definitions tend to b
the way, based on previous experience solving similar problems, adapted
implicitly to suit the model at hand. If you ask the applied
mathematician exactly what his axioms are, which symbolic algebra he is
using, he usually doesn't know; what he knows is that the bridge is
founded in certain soils with specified tolerances, suffers
suchandsuch a wind load, etc. To avoid error, the applied
+using, he usually does not know; what he knows is that the physical
+system he is analyzing, describing or planningsay, a bridgeis to
+be founded in certain soils with observed tolerances, is to suffer
+suchandsuch a wind load, and so on. To avoid error, the applied
mathematician relies not on abstract formalism but rather on a thorough
mental grasp of the essential physical features of the phenomenon he is
trying to model. An equation like
@@ 110,17 +250,22 @@ trying to model. An equation like
\]
may make perfect sense without further explanation to an applied
mathematical readership, depending on the physical context in which the
equation is introduced. Geometrical argumentproof by sketchis
not only trusted but treasured. Abstract definitions are wanted only
insofar as they smooth the analysis of the particular physical problem
at hand; such definitions are seldom promoted for their own sakes.
+equation is introduced. Nonaxiomatic geometrical argumentproof by
+sketchis not only trusted but treasured. Abstract definitions are
+wanted only insofar as they smooth the analysis of the particular
+physical problem at hand; such definitions are seldom promoted for their
+own sakes.
\index{Heaviside, Oliver (18501925)}
\index{Hilbert, David (18621943)}
\index{Courant, Richard (18881972)}
+\index{Pinter, Charles~C. (1938)}
+\index{Shilov, Georgi~E. (191775)}
+\index{perspective, CourantHilbertShilov}
+\index{CourantHilbertShilov perspective, the}
\index{physicist}
The irascible Oliver Heaviside, responsible for the important applied
mathematical technique of phasor analysis, once said,
+The irascible Oliver Heaviside (18501925), responsible for the
+important applied mathematical technique of phasor analysis, once said,
\begin{quote}
It is shocking that young people should be addling their brains over
mere logical subtleties, trying to understand the proof of one obvious
@@ 130,56 +275,531 @@ Exaggeration, perhaps, but from the appl
\linebreak % bad break
Heaviside nevertheless had a point. The professional mathematicians
\linebreak % bad break
Richard Courant and David Hilbert put it more soberly in~1924 when they
wrote,
+Richard Courant (18881972) and David Hilbert (18621943) put it more
+soberly in~1924 when they wrote,
\begin{quote}
 Since the seventeenth century, physical intuition has served as a vital
 source for mathematical problems and methods. Recent trends and
+ Since the seventeenth century, physical intuition has served as a
+ vital source for mathematical problems and methods. Recent trends and
fashions have, however, weakened the connection between mathematics
and physics; mathematicians, turning away from the roots of
mathematics in intuition, have concentrated on refinement and
emphasized the postulational side of mathematics, and at times have
overlooked the unity of their science with physics and other fields.
In many cases, physicists have ceased to appreciate the attitudes of
 mathematicians.\ \cite[Preface]{Courant/Hilbert}
+ mathematicians.~\cite[Preface]{Courant/Hilbert}
\end{quote}
Although the present book treats ``the attitudes of mathematicians''
with greater deference than some of the unnamed~1924 physicists
might have done, still, Courant and Hilbert could have been speaking for
the engineers and other applied mathematicians of our own day as well as
for the physicists of theirs. To the applied mathematician, the
mathematics is not principally meant to be developed and appreciated for
its own sake; it is meant to be \emph{used.} This book adopts the
CourantHilbert perspective.
%\footnote{
% Section~\ref{taylor:310} and Ch.~\ref{taylor}'s
% footnote~\ref{taylor:310:fn1} pose a particularly typical instance of
% the distinction.
%}
+And what are these ``attitudes'' of which Courant and Hilbert speak?
+To the mathematician Charles~C.\ Pinter, they are not attitudes, but
+principles:
+\begin{quote}
+ Since the middle of the nineteenth century, the axiomatic method has
+ been accepted as the only correct way of organizing mathematical
+ knowledge.~\cite[chapter~1]{Pinter}
+\end{quote}
+But accepted by whom? The mathematician Georgi~E.\ Shilov, less
+enthusiastic than Pinter for the axiomatic method, is not so sure:
+\begin{quote}
+ There are other approaches to the theory \ldots\ where things I take
+ as axioms are proved\mdots Both treatments have a key deficiency,
+ namely the absence of a proof of the compatibility of the axioms\mdots
+ The whole question, far from being a mere technicality, involves the
+ very foundations of mathematical thought. In any event, this being
+ the case, it is not very important where one starts a general
+ treatment\mdotx~\cite[Preface]{Shilov}
+\end{quote}
+Although the present book responds to ``the attitudes of mathematicians''
+with greater deference than some of Courant's and Hilbert's unnamed~1924
+physicists might have done, though Shilov himself admittedly is more
+rigorous than his own, seemingly casual words let on, still, Courant and
+Hilbert could have been speaking for the engineers and other applied
+mathematicians of our own day as well as for the physicists of theirs;
+and still, Shilov like Heaviside has a point. To the applied
+mathematician, the mathematics is not principally meant to be developed
+and appreciated for its own sake; it is meant to be \emph{used.} This
+book adopts the CourantHilbertShilov perspective.%
+\footnote{\label{intro:284.2:fn1}%
+ It is acknowledged that Hilbert at other times took what seems to be
+ the opposite perspective; and that there remains the historically
+ important matter of what the early twentieth century knew as
+ ``Hilbert's program,'' a subject this book will not address. Hilbert
+ however, perhaps the greatest of the mathematical
+ formalists~\cite[chapter~1]{Feferman}, was a broad thinker, able to survey
+ philosophical questions seriously from each of multiple points of
+ view. What Hilbert's ultimate opinion might have been, and whether
+ the words quoted more nearly represent Hilbert's own conviction or his
+ student Courant's, and how the views of either had evolved before or
+ would evolve after, are biographical questions this book will not try
+ to treat. The book would accept the particular passage recited rather
+ on its face.
+
+ Regarding Shilov, his formal mathematical rigor is easy and fluent,
+ and his book~\cite{Shilov} makes a good read even for an engineer.
+ The book you now hold however adopts not Shilov's methodsfor one
+ can read Shilov's book for thosebut only his perspective, as
+ expressed in the passage recited.
+
+ For a taste of Shilov's actual methods, try this, the very first proof
+ in his book: ``Theorem. The system [of real numbers] contains a
+ unique zero element. Proof. Suppose [that the system] contains two
+ zero elements~$0_1$ and~$0_2$. Then it follows from [the axioms of
+ commutation and identity] that $0_2=0_2+0_1=0_1+0_2=0_1$\@.
+ Q.E.D.''~\cite[\S~1.31]{Shilov}.
+}
+
+But why? Is the CourantHilbertShilov perspective really necessary, after
+all? If unnecessary, is it desirable? Indeed, since the book you are
+reading is a book of derivations, would it not be a more elegant book if
+it began from the most primitive, pure mathematical fundamentals, and
+proceeded to applications thence?
+
+If Heaviside was so irascible, then wasn't he just plain wrong?
+
+\subsection{Mathematical Platonism}
+\label{intro:284.4}
+\index{Platonism}
+\index{mathematical Platonism}
+\index{formalism}
+\index{mathematical formalism}
+
+\index{Bell, John~L.\ (1945)}
+\index{Kort\'e, Herbert}
+\index{Weyl, Hermann (18851955)}
+\index{knee}
+To appreciate the depth of the trouble in which the applied
+mathematician may soon find himself mired, should he too casually reject
+the CourantHilbertShilov perspective, consider John~L.\ Bell's and Herbert
+Kort\'e's difficult anecdote regarding Hilbert's brilliant student and
+later cordial rival, Hermann Weyl (18851955):
+\begin{quote}
+ \index{pencil}
+ \index{Sirius}
+ Weyl \ldots\ considers the experience of seeing a pencil lying on a
+ table before him throughout a certain time interval. The position of
+ the pencil during this interval may be taken as a function of the
+ time, and Weyl takes it as a fact of observation that during the time
+ interval in question this function is continuous and that its values
+ fall within a definite range. And so, he says, ``This observation
+ entitles me to assert that during a certain period this pencil was on
+ the table; and even if my right to do so is not absolute, it is
+ nevertheless reasonable and wellgrounded. It is obviously absurd to
+ suppose that this right can be undermined by `an expansion of our
+ principles of definition'as if new moments of time, overlooked by
+ my intuition, could be added to this interval; moments in which the
+ pencil was, perhaps, in the vicinity of Sirius or who knows
+ where\mdotx''~\cite{Bell/Korte}
+\end{quote}
+In Weyl's gentle irony lies a significant point, maybe, yet how should
+applied mathematics advocate such a point, or dispute it? Is applied
+mathematics even suited to such debates? What of engineering questions
+of a more mundane castsuch as, for instance, how likely Weyl's pencil
+might be to roll off if his knee bumped the table? After all, there is
+a pencil, and there is a table, and Sirius seems to have little
+immediately to do with either; and whether the pencil rolls off might
+concern us, irrespective of any particular substruction pure mathematics
+sought to build to support the technique we had used to model and
+analyze the case. Indeed, Weyl himselfa great mathematician, a
+consistent friend of the engineer and of the scientist, and a wise
+manwarns,
+\begin{quote}
+ \index{foundations of mathematics}
+ The ultimate foundations and the ultimate meaning of mathematics
+ remain an open problem; we do not know in what direction it will find
+ its solution, nor even whether a final objective answer can be
+ expected at all.~\cite{Weyl:1950}
+\end{quote}
+Just so. Fascinating as it is, we shall not try to answer Weyl's deep
+question of mathematical philosophy here.
+
+\index{Sunday}
+\index{weekday}
+\index{Hersh, Reuben (1927)}
+To the extent to which a professional mathematician classified the book
+at all, he might properly call the book you are reading a
+\emph{Platonist} work. Now, that is a fine adjective, is it not? It is
+most subtle, most lofty; and perhaps the author had better not be too
+eager to adorn his own work with it; yet let us listen to what the
+professional mathematician Reuben Hersh has to say:
+\begin{quote}
+ Most writers on the subject seem to agree that the typical ``working
+ mathematician'' is a Platonist on weekdays and a formalist on Sundays.
+ That is, when he is doing mathematics, he is convinced that he is
+ dealing with an objective reality whose properties he is attempting to
+ determine. But then, when challenged to give a philosophical account
+ of this reality, he finds it easiest to pretend that he does not
+ believe in it after all\mdots
+
+ The basis for Platonism is the awareness we all have that the problems
+ and concepts of mathematics exist independently of us as individuals.
+ The zeroes of the zeta function%
+ \footnote{%
+ This book stops short of treating Riemann's zeta function, a rather
+ interesting special function that however seems to be of even
+ greater interest in pure than in applied mathematics. If you want
+ to know, the zeta function is $\zeta(z) \equiv \sum_{k=1}^\infty
+ 1/k^z.$~\cite[chapter~10]{Spiegel}
+ }
+ are where they are, regardless of what I may think or know on the
+ % bad break
+ sub\ject\mdotx%
+ \footnote{%
+ The generous Hersh, who has thus so empathetically sketched
+ mathematical Platonism, goes on tactfully to confess that he
+ believes mathematical Platonism a myth, and to report (admittedly
+ probably correctly) that most professional mathematicians also
+ believe as he does on this point. The present writer however
+ accepts the sketch, appreciates the tact, and believes \emph{in} the
+ myth, for the reasons outlined in this introduction among others.
+ }%
+ ~\cite{Hersh}
+\end{quote}
+Your author inclines to Platonism%
+\footnote{\cite[chapter~2]{Feser}}
+on Sundays, too, yet even readers who do not so incline
+should find the book nonetheless edifying the other six days of the
+week at least.
+
+\index{Tymoczko, Thomas (19431996)}
+\index{Frege, Friedrich Ludwig Gottlob (18481925)}
+\index{Wittgenstein, Ludwig (18891951)}
+\index{No\"e, Alva (1964)}
+\index{Bertrand Russell (18721970)}
+\index{mathematician!applied, chief interest of}
+\index{ontology}
+\index{epistemology}
+Hersh goes on with tact and feeling at some length in the article from
+which his words are quoted, and it is fair to say that he probably would
+not endorse the present writer's approach in every respect.
+Notwithstanding, the philosopher Thomas Tymoczkowho unlike Hersh but
+like the present writer might fairly be described as a Platonist%
+\footnote{%
+ Tymoczko's preferred term is not ``Platonist'' but
+ ``quasi\"empiricist,'' a word Tymoczko lends a subtly different
+ emphasis.~\cite{Tymoczko:vat}
+}%
+later writes of Hersh's article,
+\begin{quote}
+ \mbox{\ldots\ }In so far as [the working philosophy of the professional
+ mathematician] is restricted to the usual mix of foundational ideas,
+ Hersh charges, [this philosophy] is generally inconsistent, always
+ irrelevant and sometimes harmful in practice and teaching.
+
+ \mbox{\ldots\ }Hersh suggests [that] the best explanation of foundational
+ concerns is in terms of the historical development of
+ mathemat\ics\mdots [H]e isolates some of the basic presuppositions of
+ foundation studies: ``that mathematics must be provided with an
+ absolutely reliable foundation'' and ``that mathematics must be a
+ source of indubitable truth.'' Hersh's point is that it is one thing
+ to accept the assumption when, like Frege, Russell or Hilbert, we feel
+ that the foundation is nearly attained. But it is quite another to go
+ on accepting it, to go on letting it shape our philosophy, \emph{long
+ after}%
+ \footnote{%
+ Emphasis in the original.
+ }
+ we've abandoned any hope of attaining that goal\mdotx~\cite{Tymoczko}
+\end{quote}
+The applied mathematician who rejects the CourantHilbertShilov perspective
+and inserts himself into \emph{this} debate%
+\footnote{%
+ See also, in no particular order,
+ \cite{Carson/Huber}\linebreak[0]%
+ \cite{Courant/Hilbert}\linebreak[0]%
+ \cite{Feferman}\linebreak[0]%
+ \cite{Frege}\linebreak[0]%
+ \cite{Hamming}\linebreak[0]%
+ \cite{Hardy}\linebreak[0]%
+ \cite{Hersh}\linebreak[0]%
+ \cite{Knopp}\linebreak[0]%
+ \cite{Krader}\linebreak[0]%
+ \cite{Lebedev}\linebreak[0]%
+ \cite{Pinter}\linebreak[0]%
+ \cite{Shilov}\linebreak[0]%
+ \cite{Sieg/Schlimm}\linebreak[0]%
+ \cite{Toader}\linebreak[0]%
+ \cite{Tymoczko}\linebreak[0]%
+ \cite{Tymoczko:vat}\linebreak[0]%
+ \cite{Watson}\linebreak[0]%
+ \cite{Weierstrass:werke}\linebreak[0]%
+ \cite{Weiner}\linebreak[0]%
+ \cite{Weyl:1950}.
+}
+may live to regret it. As the mathematician Ludwig Wittgenstein
+illustrates,
+\begin{quote}
+ [Bertrand] Russell [co\"author of \emph{Principia
+ Mathematica} and archexponent of one of the chief schools of pure
+ mathematical
+ \linebreak % bad break
+ thought]\cite{Whitehead/Russell} gives us a calculus
+ here. How this calculus of Russell's is to be \emph{extended} you
+ wouldn't know for your life, unless you had ordinary arithmetic in
+ your bones. Russell doesn't even prove $10\times 100 = 1000.$
+
+ What you're doing is constantly taking for granted a particular
+ interpretation. You have mathematics and you have Russell; you think
+ mathematics is all right, and Russell is all rightmore so; but
+ isn't this a putup job? That you can correlate them in a way, is
+ clearnot that one throws light on the
+ other.~\cite[lecture~XVI]{Wittgensteinlectures}
+\end{quote}
+The book you hold will not correlate them but will (except for some
+nonessential side commentary) confine its attention to the applied
+mathematician's honorable, chief interestwhich is to describe,
+quantify, model, plan and analyze particular physical phenomena of
+concern; and to understand topically why the specific mathematical
+techniques brought to bear on such phenomena should prosper; but not to
+place these techniques in the context of a larger ontological or
+epistemological disputea dispute that, though important in itself,
+does not directly move the applied mathematician's interest one way or
+the other.
+
+Indeed, as the philosopher Alva No\"e observes,
+\begin{quote}
+ [T]here is no stable or deeply understood account of how these
+ autonomous domains fit together. The fact that we are getting along
+ with business as if there were such an account is, well, a political
+ or sociological fact about us that should do little to
+ reassure.~\cite{Noe}
+\end{quote}
+No\"e is writing here about the nature of consciousness but could as
+well, with equal justice and for similar reasons, be writing about our
+problem of mathematical foundations.
+
+To conclude this subsection's glance upon mathematical Platonism
+we may well quote Plato himself:
+\begin{quote}
+ \index{arithmetic}
+ \index{geometry}
+ Then this is a kind of knowledge which legislation may fitly
+ prescribe; and we must endeavour to persuade those who are to be the
+ principal men of our State to go and learn arithmetic, not as
+ amateurs, but they must carry on the study until they see the nature
+ of numbers with the mind only; nor again, like merchants or
+ retailtraders, with a view to buying or selling, but for the sake of
+ their military use, and of the soul herself; and because this will be
+ the easiest way for her to pass from becoming to truth and being\mdots
+ I must add how charming the science is!\,\ldots\quad
+ [A]{r}{i}{t}{h}{m}{e}{t}{i}{c} has a very great and elevating effect,
+ compelling the soul to reason about abstract number, and rebelling
+ against the introduction of visible or tangible objects into the
+ argument\mdots [T]his knowledge may be truly called necessary,
+ necessitating as it clearly does the use of the pure intelligence in
+ the attainment of pure truth\mdots
+
+ And next, shall we enquire whether the kindred science [of geometry]
+ also concerns us?\,\ldots\ [T]he question relates \ldots\ to the
+ greater and more advanced part of geometrywhether that
+ % bad break
+ \linebreak
+ tends in any degree to make more easy the vision of the idea of good;
+ and thither, as I was saying, all things tend which compel the soul to
+ turn her gaze towards that place, where is the full perfection of
+ being, which she ought, by all means, to behold\mdots [T]he knowledge
+ at which geometry aims is knowledge of the eternal, and not of aught
+ perishing and transient. [G]{e}{o}{m}{e}{t}{r}{y} will draw the soul
+ towards truth, and create the spirit of philosophy, and raise up that
+ which is now unhappily allowed to fall down.~\cite[book~VII]{Plato}.
+\end{quote}
+\index{indulgence}
+\index{indictment}
+\index{vanity}
+The vanity of modern man (I do not say, modern mathematician) may affect
+to smile upon the ancient; but his vanity less indulges the
+ancient, who hardly needs indulgence, than indicts the modern.
+
+\index{4th century~B.C.}
+Plato is not less right today than he was in the fourth century~B.C.
+
+\subsection{Methods, notations, propositions and premises}
+\label{intro:284.45}
+\index{method}
+\index{notation}
+\index{proposition}
+\index{premise}
+
+% bad break in the next index entry
+\index{ZermeloFraenkel and Choice set theory (ZFC)}
+\index{ZFC}
+\index{set theory}
+\index{Zermelo, Ernst (18711953)}
+\index{Fraenkel, Abraham (18911965)}
+\index{Cantor, Georg (18451918)}
+\index{Weyl, Hermann (18851955)}
+The book purposely overlooks, and thus omits, several of the mathematics
+profession's pure methods and some of its more recondite notations,
+unsuited to (or at any rate unintended for) applied use. Most notably,
+the book overlooks and omits the profound methods and notations of the
+ZermeloFraenkel and Choice set
+theory~(ZFC)\footnote{%
+ \index{Bertrand Russell (18721970)}%
+ See \cite{Wilson}\cite{Dasgupta}. The~ZFC is a near descendant of
+ the work of the aforementioned Bertrand Russell and, before Russell,
+ of Georg Cantor.%
+}
+and its consequents. The book inclines rather toward Weyl's view:
+\begin{quote}
+ [A settheoretic approach] contradicts the essence of the continuum,
+ which by its very nature cannot be battered into a set of separated
+ elements. Not the relationship of an element to a set, but that of a
+ part to a whole should serve as the basis\mdots~\cite{Scholz}
+\end{quote}
+The years have brought many mathematical developments since Weyl wrote
+these words in~1925 but the present author still tends to think as
+Weyl does.
+
+\index{intuitive proposition}
+\index{unsupported proposition}
+\index{premise}
+\index{square!tilted}
+\index{square!rotated}
+\index{continuum}
+\index{meanvalue theorem}
+\index{identifying property}
+\index{discontinuity}
+This does not of course mean that the author or this book intended to peddle
+nonintuitive mathematical propositions, unsupported, as fact. The book
+could hardly call itself a book of derivations if it did. What it does
+mean is that the book can assume without further foundation or
+explicationand without extremes of formal definitionfor example,
+that a rotated square remains square; that a number like $\sqrt{3}/2$
+occupies a definite spot in a comprehensible
+continuum;\footnote{\cite{Weyl:1918}}
+that no numbers in the continuum other than~0 and~1 enjoy these two
+numbers' respective identifying properties;%
+\footnote{See footnote~\ref{intro:284.2:fn1}.}
+that one can impute an unobserved population against an observed
+sample;\footnote{\cite[chapter~11]{Bulmer}}
+that a continuous, differentiable,\footnote{%
+ Are burdensome adjectives like ``continuous, differentiable''
+ necessary? Are they helpful? Do they sufficiently illuminate
+ one's understanding that they should be suffered to clutter the text
+ so?
+
+ Maybe they are indeed necessary. Maybe they are even helpful but,
+ even if so, does the discerning reader want them? Does the
+ \emph{nondiscerning} reader want them? If neither, then whom do
+ they serve? If the only answer is that they serve the investigator
+ of foundational concerns, then what does this tell us about the
+ wisdom of injecting foundational concerns into applications?
+
+ Regarding continuity and differentiability: the applicationist is
+ inclined to wait until a specific problem arises in which a
+ particular, concrete discontinuity or undifferentiability looms, when
+ he will \emph{work around} the discontinuity or undifferentiability as
+ neededwhether by handling it as a parameterized limit or by
+ addressing it in some other convenient way. None of this has much to
+ do with foundations.
+
+ To treat every such point as a fundamental challenge to one's
+ principles of definition just takes too long and anyway does not much
+ help. The scientist or engineer wants to save that time to wrestle
+ with physical materialities.%
+}
+real function's average slope over a real interval equals the
+function's instantaneous slope at at least one
+point;\footnote{\cite[\S~7.4]{Shilov}}
+and so on (and if you did not understand all of that, that is
+all right, for to explain such things is what the rest of the
+book's chapters are for). This is what the book's approach means.
+
+\index{premise!implied, unstated}
+The Pythagorean theorem at the chapter's head examples the approach.
+The theorem is proved briefly, without excessive abstraction, working
+upon the \emph{implied, unstated premise} that a rotated square remains
+square.
+
+If you can accept the premise, then you can accept the proof. If you
+can accept the \emph{kind} of premise, then you can accept the book.
+
+\subsection{Rigor to forestall error}
+\label{intro:284.5}
+\index{rigor!to forestall error}
+\index{error!forestalled by rigor}
+
+\index{ontology}
+\index{epistemology}
+Aside from foundational concernswhatever the ontological or
+epistemological merits of formal mathematical rigor may besome will
+laud such rigor too for forestalling error, even in applications.%
+\footnote{%
+ To cite a specific source here might be more captious than helpful,
+ so the reader is free to disregard the assertion as unsourced.
+ However, if you have heard the argumentperhaps in conjunction
+ with the example of a conditionally convergent sum or the
+ likethen the writer has heard it, too.
+}
+Does the rigor deserve this praise? Well, perhaps it does. Still,
+though the writer would not deny the praise's decorum in every instance,
+he does judge such praise to have been oversold by a few.
+
+\index{Hamming, Richard~W.\ (19151998)}
+Notwithstanding, formal mathematical rigor serves two, distinct
+programs. On the one hand, it embodies the pure mathematician's noble
+attempt to establish, illuminate or discover the means and meaning of
+\emph{truth.}\footnote{%
+ \index{Aristotle (384322~B.C.)}%
+ \index{Aquinas, St.~Thomas (12251274)}%
+ \index{G\"odel, Kurt Friedrich (19061978)}%
+ As courtesy to the reader, I should confess my own opinion in the
+ matter, which is that it is probably, fundamentally not given to
+ mortal man to lay bare the ultimate foundations of truth, as it is not
+ given to the beasts, say, to grasp mathematics. Like the beasts, we
+ too operate within the ontological constraints of our nature.
+
+ That this should be my opinion will not perhaps surprise readers who
+ have read the preface and the chapter to this point. As far as I
+ know, Aristotle, Aquinas and G\"odel were right.
+ %(Some sophomore will fell compelled to interject that Aristotle was
+ %misinformed regarding astronomy, but that obviously isn't my point.)
+ However, be that as it may, my opinion in the matter is not very
+ relevant to this book's purpose. I do not peddle it but mention it
+ only to preclude misunderstanding regarding the sympathies and biases,
+ such as they are, of the book's author. THB%
+}
+On the other hand, it crosschecks intuitive \emph{logic} in a
+nonintuitive way. Neither hand holds absolute metaphysical guarantees;
+yet, even if the sole use of formal mathematical rigor were to draw the
+mathematician's attention systematically to certain species of
+questionable reasoning for further examination, such rigor would merit
+the applicationist's respect. As the mathematician Richard~W.\ Hamming
+writes,
+\begin{quote}
+ When you yourself are responsible for some new application of
+ mathematics in your chosen field, then your reputation, possibly
+ millions of dollars and long delays in the work, and possibly even
+ human lives, may depend on the results you predict. It is then that
+ the \emph{need} for mathematical rigor will become painfully obvious
+ to you. Before this time, mathematical rigor will often seem to be
+ needless pedantry\mdots~\cite[\S~1.6]{Hamming}
+\end{quote}
+Sobering words. Nevertheless, Hamming's point is not a point this book
+will pursue.
\index{style}
The introduction you are now reading is not the right venue for an essay
on why both kinds of mathematicsapplied and professional (or
pure)are needed. Each kind has its place; and although it is a
stylistic error to mix the two indiscriminately, clearly the two have
much to do with one another. However this may be, this book is a book
of derivations of applied mathematics. The derivations here proceed by
a purely applied approach.
+The introduction you are reading is not the right venue for a full essay
+on why both kinds of mathematics, applied and pure, are needed at any
+rate. Each kind has its place; and though it is a stylistic error to
+mix the two indiscriminately, clearly the two have much to do with one
+another. However that may be, this book is a book of derivations of
+applied mathematics. The derivations here proceed by an applied
+approach.
\subsection{Mathematical extension}
+\section{Mathematical extension}
\label{intro:284.1}
\index{extension}
Profound results in mathematics are occasionally achieved simply by
extending results already known. For example, negative integers and
+Profound results in mathematics occasionally are achieved simply by
+extending results already known. For example, the negative integers and
their properties can be discovered by counting backward3, 2, 1,
0then asking what follows (precedes?) 0 in the countdown and what
+0and then asking what follows (precedes?)\ 0 in the countdown and what
properties this new, negative integer must have to interact smoothly
with the already known positives. The astonishing Euler's formula
(\S~\ref{cexp:230}) is discovered by a similar but more sophisticated
mathematical extension.
More often, however, the results achieved by extension are unsurprising
+\index{European Renaissance}
+\index{Renaissance}
+More often however, the results achieved by extension are unsurprising
and not very interesting in themselves. Such extended results are the
faithful servants of mathematical rigor. Consider for example the
triangle on the left of Fig.~\ref{intro:284:fig}.
@@ 233,7 +853,7 @@ This triangle is evidently composed of t
A_1&=&\frac{b_1h}{2}, \\
A_2&=&\frac{b_2h}{2}
\eqb
(each right triangle is exactly half a rectangle). Hence the main
+(each right triangle is exactly half a rectangle). Hence, the main
triangle's area is
\[
A = A_1 + A_2 = \frac{(b_1+b_2)h}{2} = \frac{bh}{2}.
@@ 241,108 +861,111 @@ triangle's area is
Very well. What about the triangle on the right? Its~$b_1$ is not
shown on the figure, and what is that~$b_2$, anyway? Answer: the
triangle is composed of the \emph{difference} of two right triangles,
with~$b_1$ the base of the larger, overall one: $b_1=b+(b_2)$. The~$b_2$
is negative because the sense of the small right triangle's area
in the proof is negative: the small area is subtracted from the large
rather than added. By extension on this basis, the main triangle's area
is again seen to be $A = bh/2$. The proof is exactly the same. In
fact, once the central idea of adding two right triangles is grasped,
the extension is really rather obvioustoo obvious to be allowed to
burden such a book as this.
+with~$b_1$ the base of the larger, overall one: $b_1=b+(b_2)$.
+The~$b_2$ is negative (whereby~$b_2$ is positive) because the sense of
+the small right triangle's area in the proof is negative: the small area
+is subtracted from the large rather than added. By extension on this
+basis, the main triangle's area is seen again to be $A = bh/2$. The
+proof is the same. In fact, once the central idea of adding two right
+triangles is grasped, the extension is really rather obvioustoo
+obvious to be allowed to burden such a book as this.
+%\footnote{%
+% One is given to understand that ``[n]{e}{g}{a}{t}{i}{v}{e} numbers
+% were a fruit of the European Renaissance''~\cite{Derbyshire}\@.
+% Mathematical life before negative numbers is not easy to imagine.
+% If one were unacquainted with the negative, then would the triangles'
+% extension be nonobvious? That it truly would be seems hard to
+% credit.%
+%}
\index{edge case}
Excepting the uncommon cases where extension reveals something
+Excepting the uncommon cases in which extension reveals something
interesting or new, this book generally leaves the mere extension of
proofsincluding the validation of edge cases and overtheedge
casesas an exercise to the interested reader.
% 
\section{Complex numbers and complex variables}
+\section{Deduction, adduction and the complex variable}
\label{intro:310}
\index{complex number}
\index{number!complex}
\index{complex variable}
\index{variable!complex}
More than a mastery of mere logical details, it is an holistic view of
the mathematics and of its use in the modeling of physical systems which
is the mark of the applied mathematician. A \emph{feel} for the math is
the great thing. Formal definitions, axioms, symbolic algebras and the
like, though often useful, are felt to be secondary. The book's rapidly
staged development of complex numbers and complex variables is planned
on this sensibility.
+\index{adduction}
+\index{deduction}
+The English language derives from the Latin a nice counterpart to the
+transitive verb \emph{to deduce,} a verb whose classical root means ``to
+lead away.'' The counterpart is \emph{to adduce,} ``to lead toward.''
+Adduction, as the word is here used, subtly reverses the sense of
+deduction: it establishes premises from necessary conclusions rather
+than the other way around.%
+\footnote{%
+ As \S~\ref{intro:284.2} has discussed, pure mathematics can
+ occasionally, implicitly illuminate its \emph{axioms} in the light of
+ necessary conclusions. Since axioms are by definition a restricted
+ kind of premise, one might arguably regard the illumination named as
+ an elevated species of adduction. However, that is not what this
+ section is about.
+}
+
+Applied mathematics sometimes prefers adduction to deduction.
+Attention is drawn to this preference because the preference governs the
+book's approach to the \emph{complex number} and the \emph{complex
+variable.} The book will speak much soon of the complex number and the
+complex variable, but we mention them now for the following reason.
+
+An overall view of relevant analytical methodsincluding complex
+% bad break
+meth\odsand
+of their use in the modeling of physical systems, marks the applied
+mathematician more than does the abstract mastery of any received
+program of pure logic.%
+\footnote{%
+ Pure logic is a worthy endeavor, though whether such
+ logic is more properly \emph{received} or rather \emph{illuminated} is
+ a matter of long dispute. Besides Hilbert, of whom we have already
+ spoken, see also Frege~\cite{Frege} and the commentary~\cite{Weiner}.
+}
+A \emph{feel} for the math is the great thing.
+Formal definitions, axioms, symbolic algebras and the like, though often
+useful, are esteemed less as primary objects than as secondary supports.
+The book's adductive, rapidly staged development of the complex number
+and the complex variable is planned on this sensibility.
Sections~\ref{alggeo:225}, \ref{trig:278}, \ref{trig:280},
\ref{drvtv:230.35}, \ref{drvtv:240}, \ref{noth:320}, \ref{inttx:250}
and~\ref{inttx:260.50}, plus all of Chs.~\ref{cexp}
and~\ref{taylor}, constitute the book's principal stages of complex
development. In these sections and throughout the book, the reader
comes to appreciate that most mathematical properties which apply for
real numbers apply equally for complex, that few properties concern real
numbers alone.

% **********************************
% Do not forget that purec.tex quotes the following paragraph. If you
% change it here, change it there, too.
% **********************************

Pure mathematics develops an abstract theory of the complex
variable.%
\footnote{\cite{Arnold:1997}\cite{Fisher}\cite{Spiegel}\cite{Hildebrand}}
The abstract theory is quite beautiful. However, its arc takes off too
late and flies too far from applications for such a book as this. Less
beautiful but more practical paths to the topic exist;%
\footnote{
 See Ch.~\ref{taylor}'s footnote~\ref{taylor:320:fn10}.
+and~\ref{inttx:260}, plus all of chapters~\ref{cexp} and~\ref{taylor},
+constitute the book's principal stages of complex development. In these
+sections and throughout the book, the reader comes to appreciate that
+most mathematical properties which apply for real numbers apply equally
+for complex, that few properties concern real numbers alone.
+
+% diagn: The next paragraph is partially quoted in purec.tex.
+
+Pure mathematics develops its own, beautiful, abstract theory of the
+complex variable,%
+\footnote{\cite{Arnold:1997}\cite{Shilov}\cite{Fisher}\cite{Spiegel}\cite{Hildebrand}}
+a theory whose
+arc regrettably takes off too late and flies too far from
+applications for such a book as this.
+Less beautiful, less abstract, more practical, nonaxiomatic paths to
+the topic exist,%
+\footnote{%
+ See chapter~\ref{taylor}'s footnote~\ref{taylor:320:fn10}.
}
this book leads the reader along one of these.
+and this book leads the reader along one of these.
For supplemental reference, a bare sketch of the abstract theory of the
complex variable is found in Appendix~\ref{purec}.

%% 
%
%\section{To undergraduate readers}
%\label{intro:320}
%
%Technical books written for a postgraduate readership tend to present
%their mathematics tersely, as this book does in most places. Though
%the book is not intended solely for postgraduates, it does leave
%significant responsibility on the reader's shoulders. A topic a
%lowerdivision college text would develop in twenty pagesor an
%upperdivision text, in fivethis book develops in two. Readers
%unused to such density may find the book hard to read at first.
%
%Karl Hahn has written, ``[A]s far as the \ldots complaint \ldots that
%math is hard, I can't help that. It is hard.'' Hahn is right.
%
%To spend twenty pages to develop two pages' worth of mathematics however
%does not make the math much easier, however; it mostly just makes the
%pages turn faster. What the extra pages do is to guide the student
%\emph{who has not yet fully learned how to learn.} Such a book as the
%one you are now reading can hardly afford the pages.
%
%This book omits no essential theory by design, but neither does it bring
%much in the way of application, reflection, practice or exercise, which
%of course are at least as necessary as the theory is. Postgraduate
%technical books mostly expect their readers to provide their own
%application, reflection, practice and exercise. This book expects that,
%too.
%
%If you have never tried to read a book of this kind, then you might,
%starting now. It is a useful skill. Allow plenty of time per page.
%
% 
+complex variable is found in appendix~\ref{purec}\@.
\section{On the text}
\label{intro:290}
The book gives numerals in hexadecimal. It denotes variables in Greek
letters as well as Roman. Readers unfamiliar with the hexadecimal
notation will find a brief orientation thereto in Appendix~\ref{hex}.
+notation will find a brief orientation thereto in appendix~\ref{hex}\@.
Readers unfamiliar with the Greek alphabet will find it in
Appendix~\ref{greek}.
+appendix~\ref{greek}\@.
\index{GNU General Public License}
\index{General Public License, GNU}
@@ 355,41 +978,24 @@ Licensed to the public under the GNU Gen
version~2, this book meets the Debian Free Software
Guidelines~\cite{DFSG}.
%\index{citation}
%If you cite an equation, section, chapter, figure or other item from
%this book, it is recommended that you include in your citation the
%book's precise
%draft
%date as given on the title page. The reason is
%that equation numbers, chapter numbers and the like are numbered
%automatically by the \LaTeX\ typesetting software: such numbers can
%change arbitrarily from draft to draft. If an exemplary citation helps,
%see~\cite{self} in the bibliography.

A book of mathematical derivations by its nature can tend to make dry,
even gray reading. Delineations of black and white become the book's
duty. Mathematics however should serve the demands not only of
deduction but equally of insight, by the latter of which alone
mathematics derives either feeling or use. Yet, though \emph{this} book
does tryat some risk to strict consistency of toneto add color in
+% Well, this paragraph is a flight of rhetoric, is it not? Does
+% it work? I am not sure. However, I like it enough that I will keep
+% it for the indefinite (not unlikely permanent) time being. THB
+By its nature, a book of mathematical derivations can make strait,
+colorless reading. To delineate logic as it were in black and white is
+the book's duty. What then to tint? Is naught to be warm nor cool,
+naught ruddy nor blue? Though mathematics at its best should serve the
+demands not only of deduction but equally of insight, by the latter of
+which alone mathematics derives either feeling or use; though
+this book does occasionally
+try (at some risk to strict consistency of tone) to add color in
suitable shades, to strike an appropriately lively balance between the
opposite demands of logical progress and literary relief; nonetheless,
neither every sequence of equations nor every conjunction of figures is
susceptible to an apparent hue the writer can openly paint upon it, but
+susceptible to an apparent hue the writer can openly paint upon itbut
only to that abeyant hue, that luster which reveals or reflects the fire
of the reader's own mathematical imagination, which color otherwise
remains unobserved. The book's subject and purpose thus restrict its
overt style.

% The following paragraph comes across as scolding.
%
%The reader unfamiliar with such a style
%% (a relatively accessibly species of what publishers in the writer's
%% country call a ``graduatelevel'' style)
%may prefer to begin reading with a pencil and notebook in hand; for few
%practices will help more to confirm understanding, or to discover the
%latent train of thought in technical proseindeed, even to perceive
%the aforementioned abeyant huethan taking notes.
+of the reader's own mathematical imagination, which color remains
+otherwise unobserved.
The book begins by developing the calculus of a single variable.
diff pruN 0.53.201204142/tex/inttx.tex 0.56.20180123.12/tex/inttx.tex
 0.53.201204142/tex/inttx.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/inttx.tex 20180123 20:35:39.000000000 +0000
@@ 39,9 +39,20 @@ For instance,
\int_1^x \frac{1}{\tau} \,d\tau = \ln\tau_1^x = \ln x.
\]
One merely looks at the integrand $1/\tau$, recognizing it to be the
derivative of $\ln\tau$, then directly writes down the solution $\ln
+derivative of $\ln\tau$, and then directly writes down the solution $\ln
\tau_1^x$. Refer to \S~\ref{integ:230}.
+{
+ \nc\tta[1]{\ensuremath
+ \int_{1/2}^x \frac{1}{\sqrt{1\tau^2}} \,d\tau
+ = \arcsin \tau_{1/2}^x = \arcsin x  \frac{2\pi}{\mbox{#1}}%
+ }
+ Again for instance, \[\tta{0xC},\]
+ or, if you prefer decimal notation, \[\tta{12}.\]
+ Refer to Table~\ref{trig:hourtable} and, more importantly, to
+ Table~\ref{cexp:drvi}.
+}
+
The technique by itself is pretty limited. However, the frequent object
of other integration techniques is to transform an integral into a form
to which this basic technique can be applied.
@@ 76,7 +87,7 @@ numbers.
Consider the integral
\[
 S = \int_{x_1}^{x_2}\frac{x\,dx}{1+x^2}.
+ S \equiv \int_{x_1}^{x_2}\frac{x\,dx}{1+x^2}.
\]
This integral is not in a form one immediately recognizes. However,
with the change of variable
@@ 113,6 +124,75 @@ with other techniques.
% 
+% diagn: new section wants review
+\section[Reversal and scaling]{Reversal and scaling of the independent variable}
+\label{inttx:reversal}
+
+\index{integration!by reversal of the independent variable}
+\index{reversal of the independent variable}
+\index{independent variable!reversal of}
+\index{variable!independent, reversal of}
+Section~\ref{inttx:220} has introduced integration by substitution.
+Many substitutions are possible but the simple change of variable
+\bq{inttx:reversal:10}
+ \begin{split}
+ u &\la x, \\
+ du &= dx,
+ \end{split}
+\eq
+is so easy a substitution, and is so often helpful, that it merits a
+section of its own.
+
+If
+\bq{inttx:reversal:20}
+ S \equiv \int_{x=a}^{b} f(x) \,dx,
+\eq
+where $f(x)$ is a function one wishes to integrate, then
+changing variables according to~(\ref{inttx:reversal:10}) gives that
+\[
+ S = \int_{u=a}^{b} f(u) (du)
+ = \int_{u=a}^{b} f(u) \,du,
+\]
+which is that
+\bq{inttx:reversal:30}
+ S = \int_{u=b}^{a} f(u) \,du.
+\eq
+
+Even easier is the case that $a=\infty$, $b=\infty$, in which
+\bq{inttx:reversal:40}
+ \int_{\infty}^{\infty} f(x) \,dx =
+ \int_{\infty}^{\infty} f(u) \,du.
+\eq
+
+The technique of reversal of the independent variable seldom
+solves an integral on its own but can put an integral in a form
+to which other techniques can be applied, as for example in
+\S~\ref{fouri:110.30}.
+
+\index{integration!by scaling of the independent variable}
+\index{scaling of the independent variable}
+\index{independent variable!scaling of}
+\index{variable!independent, scaling of}
+Related is the change of variable
+\bq{inttx:reversal:50}
+ \begin{split}
+ ku &\la x, \\
+ k\,du &= dx, \\
+ \Im(k) &= 0,
+ \end{split}
+\eq
+by which
+\bq{inttx:reversal:60}
+ S = k\int_{u=a/k}^{b/k} f(ku) \,du.
+\eq
+In the case that $a=\infty$, $b=\infty$,
+\bq{inttx:reversal:70}
+ \int_{\infty}^{\infty} f(x) \,dx =
+ k \int_{\infty}^{\infty} f(ku) \,du.
+\eq
+
+% 
+
\section{Integration by parts}
\label{inttx:230}
\index{integration!by parts}
@@ 204,7 +284,7 @@ we evidently have that
\end{split}
\]
Substituting these according to~(\ref{inttx:parts})
into~(\ref{inttx:230:gamma}) yields
+into~(\ref{inttx:230:gamma}) yields that
\bqb
\Gamma(z) &=& \left[ e^{\tau}\frac{\tau^z}{z} \right]_{\tau=0}^{\infty}
 \int_0^\infty \left(\frac{\tau^z}{z}\right) \left(e^{\tau} \,d\tau\right)
@@ 212,7 +292,7 @@ into~(\ref{inttx:230:gamma}) yields
\int_0^\infty \frac{\tau^z}{z} e^{\tau} \,d\tau
\\&=& \frac{\Gamma(z+1)}{z}.
\eqb
When written
+When written as
\bq{inttx:230:20}
\Gamma(z+1) = z\Gamma(z),
\eq
@@ 313,7 +393,7 @@ guess' derivative is
dx = \alpha Ae^{\alpha t}\,dt.
\]
Substituting the last two equations into~(\ref{inttx:240:26}) and
dividing by~$dt$ yields
+dividing by~$dt$ yields that
\[
\alpha Ae^{\alpha t} = IAe^{\alpha t} + IB  P,
\]
@@ 389,7 +469,7 @@ We pass now from the elephant to the fal
sublime. Consider the definite integral%
\footnote{\cite[\S~1.2]{Lebedev}}
\[
 S = \int_0^\infty \frac{\tau^a}{\tau+1} \,d\tau, \ \ 1 < a < 0.
+ S \equiv \int_0^\infty \frac{\tau^a}{\tau+1} \,d\tau, \ \ 1 < a < 0.
\]
This is a hard integral. No obvious substitution, no evident factoring
into parts, seems to solve the integral; but there is a way.
@@ 399,7 +479,7 @@ complex variable~$z$ in place of the rea
integral formula~(\ref{taylor:cauchy}) has that integrating once
counterclockwise about a closed complex contour, with the contour
enclosing the pole at $z=1$ but shutting out the branch point at $z=0$,
yields
+yields that
\[
I = \oint \frac{z^a}{z+1} \,dz
= i2\pi z^a_{z=1}
@@ 548,7 +628,7 @@ Another example%
\footnote{\cite{Kohlerlecture}}
is
\[
 T = \int_0^{2\pi} \frac{d\theta}{1+a\cos\theta},
+ T \equiv \int_0^{2\pi} \frac{d\theta}{1+a\cos\theta},
\ \ \Im(a)=0,\ \left\Re(a)\right < 1.
\]
As in the previous example, here again the contour is not closed.
@@ 587,7 +667,7 @@ meaning that one of the two poles lies w
without, as is seen by the successive steps%
\footnote{
These steps are perhaps best read from bottom to top. See
 Ch.~\ref{noth}'s footnote~\ref{noth:420:85}.
+ chapter~\ref{noth}'s footnote~\ref{noth:420:85}.
}
\[
\renewcommand{\arraystretch}{1.5}
@@ 608,7 +688,7 @@ without, as is seen by the successive st
\er
\]
Per Cauchy's integral formula~(\ref{taylor:cauchy}), integrating
about the pole within the contour yields
+about the pole within the contour yields that
\[
T = \left.
i2\pi \frac{i2/a}{
@@ 657,7 +737,7 @@ evaluating integrals by this section's t
This section treats integration by partialfraction expansion. It
introduces the expansion itself first.%
\footnote{%
 \cite[Appendix~F]{Phillips/Parr}%
+ \cite[appendix~F]{Phillips/Parr}%
\cite[\S\S~2.7 and~10.12]{Hildebrand}
}
Throughout the section,
@@ 685,7 +765,7 @@ Combining the two fractions over a commo
is the \emph{numerator} and~$A$ is the \emph{denominator.} The
\emph{quotient} is $Q=B/A$.
}
yields
+yields that
\[
f(z) = \frac{z+3}{(z1)(z2)}.
\]
@@ 787,7 +867,7 @@ subsection treats the matter in a differ
\index{Parseval, MarcAntoine (17551836)}
Consider the function
\bq{inttx:260:40}
 g(z) = \sum_{k=0}^{N1} \frac{Ce^{i2\pi k/N}}{z\ep e^{i2\pi k/N}},
+ g(z) \equiv \sum_{k=0}^{N1} \frac{Ce^{i2\pi k/N}}{z\ep e^{i2\pi k/N}},
\ \ N>1,\ 0<\ep\ll 1,
\eq
where~$C$ is a realvalued constant. This function evidently has a
@@ 867,6 +947,9 @@ $N$fold pole with a small circle of ord
incidentally that $1/N\ep^{N1}$ is a large number not a small. The
poles are close together but very strong.
+\subsection{An example}
+\label{inttx:260.21}
+
An example to illustrate the technique, separating a double pole:
\bqb
f(z) &=& \frac{z^2z+6}{(z1)^2(z+2)} \\
@@ 886,28 +969,76 @@ An example to illustrate the technique,
\brr \\
&=& \lim_{\ep\ra 0}\blr
\left(\frac{1}{z[1+\ep]}\right)
 \left[\frac{6+\ep}{6\ep+2\ep^2}\right]
+ \left[\frac{6+\ep+\ep^2}{6\ep+2\ep^2}\right]
\right. \\&& \left.\makebox[2.0\parindent]{} +
\left(\frac{1}{z[1\ep]}\right)
 \left[\frac{6\ep}{6\ep+2\ep^2}\right]
+ \left[\frac{6\ep+\ep^2}{6\ep+2\ep^2}\right]
\right. \\&& \left.\makebox[2.0\parindent]{} +
\left(\frac{1}{z+2}\right)
 \left[\frac{\mbox{0xC}}{9}\right]
 \brr \\
 &=& \lim_{\ep\ra 0}\left\{
+ \left[\frac{\mbox{0xC}}{9\ep^2}\right]
+ \brr
+\eqb
+As usual when handling infinitesimals like~$\ep$, we can drop from
+each sum its insignificant terms in the limit, but which terms are
+insignificant depends on the ratio in which each sum is:
+\bqb
+ \lim_{\ep\ra 0} \frac{6\pm\ep+\ep^2}{\pm 6\ep+2\ep^2}
+ &=& \lim_{\ep\ra 0}
+ \blr \frac{6}{\pm 6\ep+2\ep^2} + \frac{\pm\ep}{\pm 6\ep} \brr
+ = \lim_{\ep\ra 0}
+ \blr \frac{\pm 1/\ep}{1\pm\ep/3} + \frac{1/6}{1} \brr \\
+ &=& \lim_{\ep\ra 0}
+ \blr\bigg(\pm\frac{1}{\ep}\bigg)\bigg(1\mp\frac \ep 3+\cdots\bigg)+\frac{1}{6}\brr\\
+ &=& \pm\frac{1}{\ep}\frac{1}{3}+\frac{1}{6}
+ = \pm\frac{1}{\ep}\frac{1}{6}.
+\eqb
+Thus,
+\[
+ f(z)
+ = \lim_{\ep\ra 0}\left\{
\frac{1/\ep  1/6}{z[1+\ep]}
+ \frac{1/\ep1/6}{z[1\ep]}
+ \frac{4/3}{z+2}
 \right\} \\
 &=& \lim_{\ep\ra 0}\left\{
+ \right\}
+\]
+gives the partialfraction expansion of $f(z)$.
+
+\index{pole!shadow}
+\index{pole!proper or distinct}
+\index{shadow pole}
+\index{proper pole}
+\index{distinct pole}
+\index{lurking}
+\index{hiding}
+\index{alternate form}
+\index{form, alternate}
+Incidentally, one can recombine terms to reach the alternate form
+\[
+ f(z)
+ = \lim_{\ep\ra 0}\left\{
\frac{1/\ep}{z[1+\ep]}
+ \frac{1/\ep}{z[1\ep]}
+ \frac{1/3}{z1}
+ \frac{4/3}{z+2}
\right\}.
\eqb
Notice how the calculation has discovered an additional, single pole at
$z=1$, the pole hiding under dominant, double pole there.
+\]
+This alternate form is valid and, for many purposes (as in
+\S~\ref{inttx:260.30}), is even handy; but one should interpret it
+cautiously: counting terms in the alternate form, you would think that
+$f(z)$ had four poles whereas it has in fact only three. The pole of
+the ratio $(1/3)/(z1)$infinitely weaker than, yet lying infinitely
+near to, the poles of the two ratios $(1/\ep)(z[1\pm\ep])$is indeed
+a pole of the ratio $(1/3)/(z1)$, but it is not a proper, distinct
+pole of $f(z)$. It can be understood as a sort of shadow pole, if you
+like, that lurks near or hides under the twin dominant poles that loom
+over it. To see the proper, distinct poles of $f(z)$, refer rather to
+the earlier form.
+
+However one might choose to account for and to describe the shadow pole,
+one cannot merely
+omit it. If one did omit it, then recombining the remaining partial
+fractions over a common denominator (try it!) would fail to recover our
+original expression for $f(z)$.
\subsection{Integrating a rational function}
\label{inttx:260.30}
@@ 921,7 +1052,7 @@ and~(\ref{inttx:260:25})and, if neede
expand the function into a sum of partial fractions, each of which one
can integrate individually.
Continuing the example of \S~\ref{inttx:260.20}, for $0\le x<1$,
+Continuing the example of \S~\ref{inttx:260.21}, for $0\le x<1$,
\bqb
\int_0^x f(\tau) \,d\tau &=& \int_0^x
\frac{\tau^2\tau+6}{(\tau1)^2(\tau+2)} \,d\tau \\
@@ 968,7 +1099,7 @@ Continuing the example of \S~\ref{inttx:
 \frac 1 3 \ln(1x)
+ \frac 4 3 \ln\left(\frac{x+2}{2}\right).
\eqb
To check (\S~\ref{integ:245}) that the result is correct, we can take
+To check per \S~\ref{integ:245} that the result is correct, we can take
the derivative of the final expression:
\bqb
\lefteqn{
@@ 995,9 +1126,8 @@ which indeed has the form of the integra
the result. (Notice incidentally how much easier it is symbolically to
differentiate than to integrate!)
% diagn: this paragraph is new and wants review
Section~\ref{fouri:250} exercises the technique in a more sophisticated
way, applying it in the context of Ch.~\ref{fouri}'s Laplace transform
+way, applying it in the context of chapter~\ref{fouri}'s Laplace transform
to solve a linear differential equation.
\subsection{The derivatives of a rational function}
@@ 1024,7 +1154,8 @@ enjoys derivatives in the general ration
where~$g$ and~$h_k$ are polynomials in nonnegative powers of~$w$. The
property is proved by induction. When $k=0$, (\ref{inttx:260:83})
is~(\ref{inttx:260:80}), so~(\ref{inttx:260:83}) is good at least for
this case. Then, if~(\ref{inttx:260:83}) holds for $k=n1$,
+this case. Then, using the product rule~(\ref{drvtv:proddiv2}),
+if~(\ref{inttx:260:83}) holds for $k=n1$.
\bqb
\frac{d^n\Phi}{dw^n}
&=& \frac{d}{dw} \left[
@@ 1079,7 +1210,7 @@ A rational function with repeated poles,
}, \label{inttx:260:50}\\
N &\equiv& \sum_{j=1}^M p_j, \xn\\
p_j &\ge& 0, \xn\\
 \alpha_{j'} &\neq& \alpha_j \ \ \mbox{if $j' \neq j$}, \xn
+ \alpha_{j'} &\neq& \alpha_j \ \ \mbox{for all $j' \neq j$}, \xn
\eqa
where~$j$, $k$, $M$, $N$ and the several~$p_j$ are integers,
cannot be expanded solely in the firstorder fractions of
@@ 1158,7 +1289,7 @@ produces the coefficients
(z\alpha_j)^{p_j}f(z)
\Big]
\right_{z=\alpha_j}
 , \ \ \ \ 0 \le \ell < p,
+ , \ \ \ \ 0 \le \ell < p_j,
\eq
to weight the expansion~(\ref{inttx:260:70})'s partial fractions.
In case of a repeated pole, these coefficients evidently depend not only
@@ 1180,7 +1311,8 @@ solution actually exists.
Comes from us the reply, ``Why should we prove that a solution exists,
once we have actually found it?''
Ah, but the professional's point is that we have found the solution only
+Ah, but the hypothetical professional's
+point is that we have found the solution only
if in fact it does exist, and uniquely; otherwise what we have
\emph{found} is a phantom. A careful review of \S~\ref{inttx:260.50}'s
logic discovers no guarantee that all of~(\ref{inttx:260:75})'s
@@ 1220,7 +1352,13 @@ and computing the difference
between them. Logically this difference must be zero for all~$z$ if the
two solutions are actually to represent the same function $f(z)$. This
however is seen to be possible only if $B_{j\ell} = A_{j\ell}$ for each
$(j,\ell)$. Therefore, the two solutions are one and the same.
+$(j,\ell)$. Therefore, the two solutions are one and the same. (The
+professional might request a further demonstration of orthogonality,
+\S~\ref{mtxinv:445};
+% diagn: supply the missing reference later, if such a section is added
+%
+% and [not yet written];
+but we will leave the point in that form.)
\emph{Existence} comes of combining the several fractions
of~(\ref{inttx:260:70}) over a common denominator and comparing the
@@ 1236,8 +1374,8 @@ resultswhich might for example (if, s
b_1 &=& A_{00} + A_{01} + A_{10}, \\
b_2 &=& 2A_{01}  5A_{10}.
\eqb
We will show in Chs.~\ref{matrix} through~\ref{eigen} that
when such a system has no solution, there always exist an alternate set
+We will show in chapters~\ref{matrix} through~\ref{eigen} that
+when such a system has no solution, there always exists an alternate set
of~$b_k$ for which the same system has multiple solutions. But
uniqueness, which we have already established, forbids such multiple
solutions in all cases. Therefore it is not possible for the system to
@@ 1249,6 +1387,250 @@ outlined here.
% 
+\section[Manipulation of a Pythagorean expression]{Integration by the
+manipulation of a Pythagorean expression}
+\label{inttx:440}
+\index{expression!Pythagorean}
+\index{Pythagorean expression}
+\index{manipulation of a Pythagorean expression}
+\index{integration by the manipulation of a Pythagorean expression}
+
+In context of the chapter you are reading, a \emph{Pythagorean
+expression} is an expression of the form of $\pm 1\pm\tau^2$. This
+section suggests ways to approach integrands that contain Pythagorean
+expressions.
+
+\subsection{Pythagorean radicals}
+\label{inttx:440.20}
+\index{radical!Pythagorean}
+\index{Pythagorean radical}
+\index{manipulation of a Pythagorean radical}
+\index{integration by the manipulation of a Pythagorean radical}
+
+\index{path length}
+\index{length of a path}
+In applications, as for instance in the pathlength computation of
+\S~\ref{integ:241.08}, one often meets integrands that contain
+Pythagorean expressions under a radical sign, like $\sqrt{1\tau^2}$,
+$\sqrt{\tau^21}$ or $\sqrt{\tau^2+1}$. An example would be
+\[
+ S_1(x) \equiv \int_0^x\frac{d\tau}{\sqrt{1\tau^2}},
+\]
+which contains the \emph{Pythagorean radical} $\sqrt{1\tau^2}$. Such a
+Pythagorean radical recalls the inverse trigonometrics of
+Table~\ref{cexp:drvi}, whereby
+\[
+ S_1(x) = \arcsin \tau_0^x = \arcsin x.
+\]
+Unfortunately, not every integrand that features such a radical appears
+in the table; so, for example,
+\[
+ S_2(x) \equiv \int_0^xd\tau\,\tau\sqrt{1\tau^2}
+\]
+wants a substitution like $u_2^2 \la 1\tau^2$, $u_2\,du_2 = \tau\,d\tau$,
+by which the technique of \S~\ref{inttx:220} finds that
+\[
+ S_2(x) = \int_1^{\sqrt{1x^2}}du_2\,u_2^2
+ = \left.\frac{u_2^3}{3}\right_1^{\sqrt{1x^2}}
+ = \frac{1(1x^2)^{3/2}}{3}.
+\]
+
+That's all relatively straightforward, but now try an integral like
+\[
+ S_3(x) \equiv \int_0^xd\tau\sqrt{1\tau^2}.
+\]
+This doesn't \emph{look} harder. Indeed, if anything, it looks slightly
+easier than $S_1(x)$ or $S_2(x)$. Notwithstanding, the techniques used
+to solve those somehow don't quite seem to work on $S_3(x)$ [try it!].
+
+As it happens, we have already met, and solved, a similar integral in
+\S~\ref{integ:241.08}. That subsection has illustrated the technique.
+Applying the same technique here, we assemble by trial a small table of
+potentially relevant antiderivatives,
+\[
+ \begin{split}
+ \frac{d}{d\tau} \arcsin\tau &= \frac{1}{\sqrt{1\tau^2}}, \\
+ \frac{d}{d\tau}\sqrt{1\tau^2} &= \frac{\tau}{\sqrt{1\tau^2}}, \\
+ \frac{d}{d\tau}\tau\sqrt{1\tau^2}
+ &= \sqrt{1\tau^2}\frac{\tau^2}{\sqrt{1\tau^2}}
+ = 2\sqrt{1\tau^2}\frac{1}{\sqrt{1\tau^2}}.
+ \end{split}
+\]
+wherein the pivotal step on the last line is to have \emph{manipulated
+the Pythagorean radical,} observing that
+\[
+ \frac{\tau^2}{\sqrt{1\tau^2}}
+ = \frac{1\tau^2}{\sqrt{1\tau^2}}+\frac{1}{\sqrt{1\tau^2}}
+ = \sqrt{1\tau^2}+\frac{1}{\sqrt{1\tau^2}}.
+\]
+Using some of the abovecomputed derivatives, the desired
+integrand
+\linebreak % bad break
+$\sqrt{1\tau^2}$ can now be built up by stages:
+\[
+ \begin{split}
+ \frac{d}{2\,d\tau}\tau\sqrt{1\tau^2}
+ &= \sqrt{1\tau^2}\frac{1}{2\sqrt{1\tau^2}}; \\
+ \frac{d}{2\,d\tau} \arcsin\tau
+ + \frac{d}{2\,d\tau}\tau\sqrt{1\tau^2}
+ &= \sqrt{1\tau^2}.
+ \end{split}
+\]
+Hence,
+\[
+ S_3(x)
+ = \left[\frac{1}{2}\arcsin\tau+\frac{1}{2}\tau\sqrt{1\tau^2} \right]_0^x
+ = \frac{1}{2}\arcsin x+\frac{1}{2}x\sqrt{1x^2},
+\]
+a result which can (and should) be checked by differentiating in the
+manner of \S~\ref{integ:245}.
+
+Here is another example of integration by the manipulation of a
+Pythagorean radical:
+\[
+ \begin{split}
+ S_4(x) &\equiv \int_1^xd\tau\,\tau^2\sqrt{\tau^21}; \\
+ \frac{d}{d\tau} \mopx{arccosh}\tau &= \frac{1}{\sqrt{\tau^21}}; \\
+ %\frac{d}{d\tau} \sqrt{\tau^21} &= \frac{\tau}{\sqrt{\tau^21}}; \\
+ \frac{d}{d\tau} \tau\sqrt{\tau^21}
+ &= \sqrt{\tau^21}+\frac{\tau^2}{\sqrt{\tau^21}} \\
+ &= 2\sqrt{\tau^21}+\frac{1}{\sqrt{\tau^21}}; \\
+ \frac{d}{d\tau} \tau^3\sqrt{\tau^21}
+ &= 3\tau^2\sqrt{\tau^21}+\frac{\tau^4}{\sqrt{\tau^21}} \\
+ &= 4\tau^2\sqrt{\tau^21}+\frac{\tau^2}{\sqrt{\tau^21}} \\
+ &= (4\tau^2+1)\sqrt{\tau^21}+\frac{1}{\sqrt{\tau^21}}; \\
+ \frac{d}{4\,d\tau} \tau^3\sqrt{\tau^21}
+ &= \left(\tau^2+\frac 1 4\right)\sqrt{\tau^21}+\frac{1}{4\sqrt{\tau^21}}.
+ \end{split}
+\]
+Having assembled the above small table of potentially relevant antiderivatives,
+we proceed:
+\[
+ \setlength\tla{6em}
+ \begin{split}
+ \lefteqn{\hspace\tla\frac{d}{8\,d\tau} \tau\sqrt{\tau^21}
+ + \frac{d}{4\,d\tau} \tau^3\sqrt{\tau^21}}\\
+ &= \tau^2\sqrt{\tau^21}+\frac{1}{8\sqrt{\tau^21}}; \\
+ \lefteqn{\hspace\tla\frac{d}{8\,d\tau} \mopx{arccosh}\tau
+ \frac{d}{8\,d\tau} \tau\sqrt{\tau^21}
+ + \frac{d}{4\,d\tau} \tau^3\sqrt{\tau^21}}\\
+ &= \tau^2\sqrt{\tau^21}; \\
+ S_4(x) &= \bigg[
+ \mbox{$\ds\frac{\mopx{arccosh}\tau}{8}$}\\
+ &\qquad+\left(\frac{\tau^2}{4}  \frac 1 8\right)\tau\sqrt{\tau^21}\bigg]_1^x; \\
+ &= \frac{\mopx{arccosh}x}{8} +\left(\frac{x^2}{4}  \frac 1 8\right)x\sqrt{x^21}.
+ \end{split}
+\]
+For yet more examples, consider
+\[
+ S_5(x) \equiv \int_1^x\frac{\tau^2\,d\tau}{\sqrt{\tau^21}}
+ = \int_1^xd\tau\sqrt{\tau^21}
+ + \int_1^x\frac{d\tau}{\sqrt{\tau^21}},
+\]
+to complete whose evaluation is left as an exercise, and
+\[
+ S_6(x) \equiv \int_0^xd\tau\sqrt{a^2\tau^2}
+ = {a^2} \int_{(\tau/a)=0}^{x/a}d\left(\frac{\tau}{a}\right)\sqrt{1\left(\frac{\tau}{a}\right)^2}
+ = a^2S_3\left(\frac x a\right).
+\]
+
+\subsection{Pythagorean nonradicals}
+\label{inttx:440.40}
+\index{nonradical, Pythagorean}
+\index{Pythagorean nonradical}
+\index{manipulation of a Pythagorean nonradical}
+\index{integration by the manipulation of a Pythagorean nonradical}
+
+Besides Pythagorean radicals, Pythagorean nonradicals occur, too.
+However, these tend to be easier to solve. For example,
+\[
+ \begin{split}
+ S_7(x)
+ &\equiv \int_0^x \frac{\tau^2\,d\tau}{1+\tau^2}
+ = \int_0^x \left(1\frac 1{1+\tau^2}\right)d\tau
+ = x  \arctan x;
+ \\
+ S_8(x)
+ &\equiv \int_0^x \frac{\tau^3\,d\tau}{1+\tau^2}
+ = \int_0^x \left(\tau\frac\tau{1+\tau^2}\right)d\tau
+ = \frac{x^2}2  \int_0^x \frac{\tau\,d\tau}{1+\tau^2},
+ \\
+ u_8^2 &\la 1+\tau^2,\ u_8\,du_8 = \tau\,d\tau,
+ \\
+ S_8(x)
+ &= \frac{x^2}2  \int_1^{\sqrt{1+x^2}} \frac{du_8}{u_8}
+ = \frac{x^2  \ln(1+x^2)}2.
+ \end{split}
+\]
+
+%\subsection{PseudoPythagorean expressions}
+%\label{inttx:440.70}
+%
+%\index{expression!pseudoPythagorean}
+%\index{pseudoPythagorean expression}
+%\index{manipulation of a pseudoPythagorean expression}
+%
+%Sometimes one meets an integral like
+%\[
+% S_9(x) \equiv \int_0^x d\tau\sqrt{\frac\tau{1+\tau}}.
+%\]
+%These probably arise less often in applications than the Pythagoreans do
+%(though this might depend on your field of work). When they do arise,
+%you can treat them in any convenient way. However, one way that is
+%sometimes convenient is to treat them as pseudoPythagoreans,
+%substituting $u_9^2 \la \tau$, $2u_9\,du = d\tau$, to obtain a form like
+%\[
+% S_9(x) = \int_0^{\sqrt{x}} \frac{2u_9^2\,du_9}{\sqrt{1+u_9^2}},
+%\]
+%which, from the earlier parts of this section, we already know how to
+%solve.
+
+% 
+
+\section{Trial derivatives}
+\label{inttx:444}
+\index{trial derivative}
+\index{derivative!trial}
+
+Besides the technique of the Pythagorean radical, \S~\ref{inttx:440.20}
+has also incidentally demonstrated another, different technique, vaguer
+but more broadly applicable. It has incidentally demonstrated the
+technique of \emph{trial
+derivatives.}\footnote{See~\cite[\S~1.5]{Hildebrand}, which introduces
+the technique in another guise under a different name.}
+
+Review the $S_3(x)$ and $S_4(x)$ of \S~\ref{inttx:440.20}. To solve
+each has required us to develop a small table of potentially relevant
+antiderivatives. How did we develop each small table? Well, we began
+by copying a relevant inversetrigonometric entry from
+Table~\ref{cexp:drvi}; but then, to extend the table, we \emph{tried}
+taking the derivatives of various functions that resembled the
+integrand or part of the integrand. Not all our trials gave useful
+antiderivatives but some did.
+
+\index{creativity}
+\index{experience}
+To decide which derivatives to try during a given integration depends on
+the mathematician's creativity and experience. However, a typical theme
+is to multiply the integrand (or part of the integrand) by~$\tau$,
+$\tau^2$ or maybe~$\tau^3$, taking the derivative of the product as
+\S~\ref{inttx:440.20} has done. It is not usually necessary, nor
+helpful, to build up a huge table butwell, when you read
+\S~\ref{inttx:440.20}, you saw how it went.
+
+\index{feedback}
+\index{artillery}
+\index{19th century}
+The reason to take trial \emph{derivatives,} incidentally, is that one
+does not generally know very well how to take trial
+\emph{antiderivatives!} Analytically, derivatives are the easier to
+take. To seek an antiderivative by taking derivatives might (or might
+not) seem counterintuitive, but it's a game of feedback and educated
+guesses, like nineteenthcentury artillery finding the range to its
+target. It is a game that can prosper, as we have seen.
+
+% 
+
\section{Frullani's integral}
\label{inttx:460}
\index{Frullani's integral}
@@ 1261,8 +1643,11 @@ One occasionally meets an integral of th
where~$a$ and~$b$ are real, positive coefficients and $f(\tau)$ is an
arbitrary complex expression in~$\tau$. One wants to split such an
integral in two as $\int [f(b\tau)/\tau]\,d\tau  \int
[f(a\tau)/\tau]\,d\tau$; but if $f(0^+) \neq f(+\infty)$, one cannot,
because each halfintegral alone diverges. Nonetheless, splitting the
+[f(a\tau)/\tau]\,d\tau$, except that
+% On later review, I don't remember why the following phrase was
+% originally included. Maybe there was a good reason for it?
+%if $f(0^+) \neq f(+\infty)$, then
+each halfintegral alone may diverge. Nonetheless, splitting the
integral in two is the right idea, provided that one first relaxes the
limits of integration as
\[
@@ 1272,7 +1657,7 @@ limits of integration as
\right\}.
\]
Changing $\sigma\la b\tau$ in the left integral and $\sigma\la a\tau$ in
the right yields
+the right yields that
\bqb
S &=& \lim_{\ep\ra 0^+} \left\{
\int_{b\ep}^{b/\ep}\frac{f(\sigma)}{\sigma} \,d\sigma
@@ 1336,8 +1721,7 @@ $\tau^{a1}\ln\tau$ tend to arise%
\ln\tau$; wherein $\ln \beta$ is just a constant.
}
among other places in integrands related to special functions
% diagn
([chapter not yet written]).
+(as in the book's part~\ref{part30}).
The two occur often enough to merit investigation here.
Concerning $\exp(\alpha\tau)\tau^n$, by \S~\ref{inttx:240}'s method of
@@ 1347,38 +1731,37 @@ unknown coefficients we guess its antide
\frac{d}{d\tau} \sum_{k=0}^n a_k \exp(\alpha\tau)\tau^k
\\&=&
\sum_{k=0}^n \alpha a_k \exp(\alpha\tau)\tau^k
 + \sum_{k=1}^n \frac{a_k}{k} \exp(\alpha\tau)\tau^{k1}
+ + \sum_{k=1}^n ka_k \exp(\alpha\tau)\tau^{k1}
\\&=&
\alpha a_n \exp(\alpha\tau)\tau^n
 + \sum_{k=0}^{n1} \left(
+ + \exp(\alpha\tau)\sum_{k=0}^{n1} \left[
\alpha a_k
+
 \frac{a_{k+1}}{k+1}
 \right)
 \exp(\alpha\tau)\tau^k.
+ (k+1)a_{k+1}
+ \right] \tau^k.
\eqb
If so, then evidently
\bqb
a_n &=& \frac{1}{\alpha}; \\
 a_k &=&  \frac{a_{k+1}}{(k+1)(\alpha)},
+ a_k &=&  \frac{k+1}{\alpha}a_{k+1},
\ \ 0 \le k < n.
\eqb
That is,
\[
 a_k = \frac{1}{
 \alpha \prod_{j=k+1}^{n}(j\alpha)
 } = \frac{()^{nk}}{
 (n!/k!) \alpha^{nk+1}
+ a_k = \frac{1}{\alpha}
+ \prod_{j=k+1}^{n}(j\alpha)
+ = \frac{n!/k!}{
+ (\alpha)^{nk+1}
}, \ \ 0 \le k \le n.
\]
Therefore,%
\footnote{\cite[Appendix~2, eqn.~73]{Shenk}}
+\footnote{\cite[eqn.~17.25.4]{Spiegel/Liu}\cite[appendix~2, eqn.~73]{Shenk}}
\bq{inttx:470:20}
\exp(\alpha\tau)\tau^n =
 \frac{d}{d\tau} \sum_{k=0}^n
 \frac{()^{nk}}{
 (n!/k!) \alpha^{nk+1}
 } \exp(\alpha\tau)\tau^k,
+ \frac{d}{d\tau} \left[\exp(\alpha\tau)\sum_{k=0}^n
+ \frac{n!/k!}{
+ (\alpha)^{nk+1}
+ } \tau^k\right],
\ \ n \in \mathbb Z, \ n \ge 0, \ \alpha \neq 0.
\eq
@@ 1394,7 +1777,7 @@ tries we eventually do strike the right
\tau^{a1}[aB\ln\tau + (B+aC)],
\eqb
which demands that $B=1/a$ and that $C=1/a^2$. Therefore,%
\footnote{\cite[Appendix~2, eqn.~74]{Shenk}}
+\footnote{\cite[eqn.~17.26.3]{Spiegel/Liu}\cite[appendix~2, eqn.~74]{Shenk}}
\bq{inttx:470:30}
\tau^{a1}\ln \tau
=
@@ 1406,20 +1789,23 @@ $\exp(\alpha\tau)\tau^n\ln\tau$ and so o
as the need arises.
Equation~(\ref{inttx:470:30}) fails when $a=0$, but in this case with a
little imagination the antiderivative is not hard to guess:
+little imagination the antiderivative is not hard to guess:%
+\footnote{\cite[eqn.~17.26.4]{Spiegel/Liu}}
\bq{inttx:470:35}
\frac{\ln \tau}{\tau}
=
\frac{d}{d\tau} \frac{(\ln\tau)^2}{2}.
\eq
If~(\ref{inttx:470:35}) seemed hard to guess nevertheless, then
l'H\^opital's rule~(\ref{drvtv:260:lhopital}), applied
to~(\ref{inttx:470:30}) as $a \ra 0$, with the observation
from~(\ref{alggeo:230:316}) that
\bq{inttx:470:40}
 \tau^a = \exp(a\ln\tau),
\eq
would yield the same~(\ref{inttx:470:35}).
+% The rest of this paragraph is not true as far as the writer can any
+% longer tell (but maybe he is now looking at in in the wrong way?)
+%If~(\ref{inttx:470:35}) seemed hard to guess nevertheless, then
+%l'H\^opital's rule~(\ref{drvtv:260:lhopital}), applied
+%to~(\ref{inttx:470:30}) as $a \ra 0$, with the observation
+%from~(\ref{alggeo:230:316}) that
+%\bq{inttx:470:40}
+% \tau^a = \exp(a\ln\tau),
+%\eq
+%would yield the same~(\ref{inttx:470:35}).
Table~\ref{inttx:470:tbl} summarizes.
\begin{table}
@@ 1429,12 +1815,25 @@ Table~\ref{inttx:470:tbl} summarizes.
\label{inttx:470:tbl}
\index{antiderivative!of a product of exponentials, powers and logarithms}
\bqb
+ \exp(\alpha\tau)
+ &=& \frac{d}{d\tau} \left[\exp(\alpha\tau)
+ \left(\frac 1 \alpha\right)
+ \right] \\
+ \exp(\alpha\tau)\tau
+ &=& \frac{d}{d\tau} \left[\exp(\alpha\tau)
+ \left(\frac\tau\alpha  \frac 1{\alpha^2}\right)
+ \right] \\
+ \exp(\alpha\tau)\tau^2
+ &=& \frac{d}{d\tau} \left[\exp(\alpha\tau)
+ \left(\frac{\tau^2}\alpha  \frac {2\tau}{\alpha^2}
+ + \frac 2{\alpha^3}\right)
+ \right] \\
\exp(\alpha\tau)\tau^n
 &=& \frac{d}{d\tau} \sum_{k=0}^n
 \frac{()^{nk}}{
 (n!/k!) \alpha^{nk+1}
 } \exp(\alpha\tau)\tau^k,
 \ \ n \in \mathbb Z, \ n \ge 0, \ \alpha \neq 0 \\
+ &=& \frac{d}{d\tau} \left[\exp(\alpha\tau)\sum_{k=0}^n
+ \frac{n!/k!}{
+ (\alpha)^{nk+1}
+ } \tau^k\right] \\
+ && \qquad\quad n \in \mathbb Z, \ n \ge 0, \ \alpha \neq 0 \\
\tau^{a1}\ln \tau
&=& \frac{d}{d\tau}\frac{\tau^a}{a}\left(\ln\tau  \frac{1}{a}\right),
\ \ a \neq 0 \\
@@ 1454,7 +1853,7 @@ Table~\ref{inttx:470:tbl} summarizes.
\index{closed form}
With sufficient cleverness the techniques of the foregoing sections
solve many, many integrals. But not all. When all else fails, as
sometimes it does, the Taylor series of Ch.~8 and the antiderivative of
+sometimes it does, the Taylor series of chapter~8 and the antiderivative of
\S~\ref{inttx:210} together offer a concise, practical way to integrate
some functions, at the price of losing the functions' known closed
analytic forms. For example,
@@ 1473,7 +1872,10 @@ from Table~\ref{taylor:315:tbl} is used
$\sin z$ is just a series, too. The series above converges just as
accurately and just as fast.
Sometimes it helps to give the series a name like
+Sometimes it helps to give the series a name like\footnote{%
+ The $\mopx{myf}$ = ``my function,'' but you can use any name for a
+ function like this.%
+}
\[
\mopx{myf} z \equiv
\sum_{k=0}^{\infty} \frac{()^kz^{2k+1}}{(2k+1)2^k k!}
diff pruN 0.53.201204142/tex/keyval.tex 0.56.20180123.12/tex/keyval.tex
 0.53.201204142/tex/keyval.tex 20120415 16:42:12.000000000 +0000
+++ 0.56.20180123.12/tex/keyval.tex 19700101 00:00:00.000000000 +0000
@@ 1,86 +0,0 @@
%%
%% This is file `keyval.tex',
%% generated with the docstrip utility.
%%
%% The original source files were:
%%
%% xkeyval.dtx (with options: `xkvkeyval')
%%
%% 
%% Copyright (C) 20042008 Hendri Adriaens
%% 
%%
%% This work may be distributed and/or modified under the
%% conditions of the LaTeX Project Public License, either version 1.3
%% of this license or (at your option) any later version.
%% The latest version of this license is in
%% http://www.latexproject.org/lppl.txt
%% and version 1.3 or later is part of all distributions of LaTeX
%% version 2003/12/01 or later.
%%
%% This work has the LPPL maintenance status "maintained".
%%
%% This Current Maintainer of this work is Hendri Adriaens.
%%
%% This work consists of the file xkeyval.dtx and derived files
%% keyval.tex, xkvtxhdr.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, pstxkey.tex, pstxkey.sty, xkveca.cls, xkvecb.cls,
%% xkvesa.sty, xkvesb.sty, xkvesc.sty, xkvex1.tex, xkvex2.tex,
%% xkvex3.tex and xkvex4.tex.
%%
%% The following files constitute the xkeyval bundle and must be
%% distributed as a whole: readme, xkeyval.pdf, keyval.tex,
%% pstxkey.sty, pstxkey.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, xkvtxhdr.tex, pstxkey.dtx and xkeyval.dtx.
%%
%%
%% Based on keyval.sty.
%%
\def\XKV@tempa#1{%
\def\KV@@sp@def##1##2{%
 \futurelet\XKV@resa\KV@@sp@d##2\@nil\@nil#1\@nil\relax##1}%
\def\KV@@sp@d{%
 \ifx\XKV@resa\@sptoken
 \expandafter\KV@@sp@b
 \else
 \expandafter\KV@@sp@b\expandafter#1%
 \fi}%
\def\KV@@sp@b#1##1 \@nil{\KV@@sp@c##1}%
 }
\XKV@tempa{ }
\def\KV@@sp@c#1\@nil#2\relax#3{\XKV@toks{#1}\edef#3{\the\XKV@toks}}
\def\KV@do#1,{%
 \ifx\relax#1\@empty\else
 \KV@split#1==\relax
 \expandafter\KV@do\fi}
\def\KV@split#1=#2=#3\relax{%
 \KV@@sp@def\XKV@tempa{#1}%
 \ifx\XKV@tempa\@empty\else
 \expandafter\let\expandafter\XKV@tempc
 \csname\KV@prefix\XKV@tempa\endcsname
 \ifx\XKV@tempc\relax
 \XKV@err{`\XKV@tempa' undefined}%
 \else
 \ifx\@empty#3\@empty
 \KV@default
 \else
 \KV@@sp@def\XKV@tempb{#2}%
 \expandafter\XKV@tempc\expandafter{\XKV@tempb}\relax
 \fi
 \fi
 \fi}
\def\KV@default{%
 \expandafter\let\expandafter\XKV@tempb
 \csname\KV@prefix\XKV@tempa @default\endcsname
 \ifx\XKV@tempb\relax
 \XKV@err{No value specified for key `\XKV@tempa'}%
 \else
 \XKV@tempb\relax
 \fi}
\def\KV@def#1#2[#3]{%
 \@namedef{KV@#1@#2@default\expandafter}\expandafter
 {\csname KV@#1@#2\endcsname{#3}}%
 \@namedef{KV@#1@#2}##1}
\endinput
%%
%% End of file `keyval.tex'.
diff pruN 0.53.201204142/tex/main.tex 0.56.20180123.12/tex/main.tex
 0.53.201204142/tex/main.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/main.tex 20180123 03:21:44.000000000 +0000
@@ 5,8 +5,8 @@
\usepackage{thb}
\newboolean{isdraft}
\setboolean{isdraft}{true}
\newcommand{\veryear}{2012}
\newcommand{\verdate}{16 April 2012}
+\newcommand{\veryear}{2018}
+\newcommand{\verdate}{23 January 2018}
% The \veryear on the next line can be changed to a concrete year
% like 1970 for the second and subsequent printings, to keep from
% updating the copyright date and the date of publication.
@@ 43,14 +43,16 @@
Thaddeus~H. Black, 1967.\\
Derivations of Applied Mathematics.\\
% Second edition.\\ % (for example)
U.S. Library of Congress class QA401.
+U.S.\ Library of Congress class QA401.
\noindent\\
Copyright \copyright\ \ifthenelse{\boolean{isdraft}}{1983}{}\firstprintingyear\ by Thaddeus~H. Black
$\langle\texttt{thb@derivations.org}\rangle$.
+Copyright \copyright\ \ifthenelse{\boolean{isdraft}}{1983}{}\firstprintingyear\ Thaddeus~H. Black.
+%$\langle\texttt{thb@derivations.org}\rangle$.
\noindent\\
Published by the Debian Project~\cite{Debian}.
+This book is published by the Debian Project~\cite{Debian}. Besides in
+Debian GNU/Linux and its derivatives~\cite{self}, you can fetch the book's latest
+opensource printing at \emph{derivations.org}~\cite{selfderivations.org}.
\noindent\\
This book is free software. You can redistribute and/or modify it under
@@ 58,7 +60,8 @@ the terms of the GNU General Public Lice
\noindent\\
\ifthenelse{\boolean{isdraft}}{%
 This is a prepublished draft, dated \verdate.%
+ % diagn: the next line has been changed; check it
+ The book was last revised \verdate.%
}{%
Version 1.01 (that is, first edition, first printing), \verdate.%
}
@@ 81,7 +84,7 @@ the terms of the GNU General Public Lice
\mainmatter
\include{intro}
\part{The calculus of a single variable}
+\part{The calculus of a single variable}\label{part10}
\include{alggeo}
\include{trig}
\include{drvtv}
@@ 91,19 +94,19 @@ the terms of the GNU General Public Lice
\include{taylor}
\include{inttx}
\include{cubic}
\part{Matrices and vectors}
+\part{Matrices and vectors}\label{part20}
\include{matrix}
\include{gjrank}
\include{mtxinv}
\include{eigen}
\include{vector}
\include{vcalc}
\part{Transforms and special functions}
+\part{Transforms and special functions}\label{part30}
\include{fours}
\include{fouri}
\include{specf}
\include{prob}
\include{stub}
+\include{conclu}
\appendix
\cleardoublepage\addcontentsline{toc}{part}{Appendices}
diff pruN 0.53.201204142/tex/Makefile 0.56.20180123.12/tex/Makefile
 0.53.201204142/tex/Makefile 20120415 16:06:10.000000000 +0000
+++ 0.56.20180123.12/tex/Makefile 20170509 14:01:15.000000000 +0000
@@ 164,7 +164,7 @@ $(out).dvi : $(main) $(def) $(cls) $(ch)
$(cls) :
sed >$(cls) $(clsdir)/$(cls0) \
e 's/^\(\\ProvidesClass{\)$(class)\(}\)[[:space:]]*$$/\1$(out)$(class)\2/' \
 e 's/^\($(p1)section{$(p2){1}{\)\([^{}]*\)\(}{\)\([^{}]*\)\(}}\)[[:space:]]*$$/\1\2\3$(l1b)\5/' \
+ e 's/^\($(p1)\(section\figure\){$(p2){1}{\)\([^{}]*\)\(}{\)\([^{}]*\)\(}}\)[[:space:]]*$$/\1\3\4$(l1b)\6/' \
e 's/^\($(p1)subsection{$(p2){2}{\)\([^{}]*\)\(}{\)\([^{}]*\)\(}}\)[[:space:]]*$$/\1$(l2a)\3$(l2b)\5/' \
e 's/^\($(p1)subsubsection{$(p2){3}{\)\([^{}]*\)\(}{\)\([^{}]*\)\(}}\)[[:space:]]*$$/\1$(l3a)\3\4\5/'
diff pruN 0.53.201204142/tex/matrix.tex 0.56.20180123.12/tex/matrix.tex
 0.53.201204142/tex/matrix.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/matrix.tex 20180113 16:10:22.000000000 +0000
@@ 10,15 +10,15 @@
\index{applied mathematics!foundations of}
\index{mathematics!applied, foundations of}
Chapters~\ref{alggeo} through~\ref{inttx} have laid solidly the basic
+Chapters~\ref{alggeo} through~\ref{inttx} have solidly laid the basic
foundations of applied mathematics. This chapter begins to build
on those foundations, demanding some heavier mathematical lifting.
Taken by themselves, most of the foundational methods of the earlier
chapters have handled only one or at most a few numbers (or functions)
at a time. However, in practical applications the need to handle large
arrays of numbers at once arises often. Some nonobvious effects emerge
then, as, for example, the eigenvalue of Ch.~\ref{eigen}.
+arrays of numbers at once arises often. Some nonobvious effects then
+emerge, as for example the eigenvalue of chapter~\ref{eigen}.
\index{matrix!motivation for}
Regarding the eigenvalue: the eigenvalue was always there, but
@@ 177,12 +177,12 @@ mathematics brings nothing else quite li
may be, but exciting they are not. At least, the earlier parts are
not very exciting (later parts are better). As a reasonable
compromise, the veteran seeking more interesting reading might skip
 directly to Chs.~\ref{mtxinv} and~\ref{eigen}, referring back to
 Chs.~\ref{matrix} and \ref{gjrank} as need arises.
+ directly to chapters~\ref{mtxinv} and~\ref{eigen}, referring back to
+ chapters~\ref{matrix} and \ref{gjrank} as need arises.
}
Chapters~\ref{matrix} through~\ref{eigen} treat the matrix and its
algebra. This chapter, Ch.~\ref{matrix}, introduces the rudiments of
+algebra. This chapter, chapter~\ref{matrix}, introduces the rudiments of
the matrix itself.%
\footnote{%
\cite{Beattie}%
@@ 206,7 +206,7 @@ matrix first arises. We begin there.
\subsection{The linear transformation}
\label{matrix:120.10}
\index{linear transformation}
\index{transformation, linear}
+\index{transformation!linear}
\index{index}
Section~\ref{integ:240.05} has introduced the idea of linearity. The
@@ 217,7 +217,7 @@ Section~\ref{integ:240.05} has introduce
transformation, the basis set and the simultaneous system of linear
equationsproving from suitable axioms that the three amount more or
less to the same thing, rather than implicitly assuming the fact. The
 professional approach \cite[Chs.~1 and~2]{Beattie}\cite[Chs.~1, 2
+ professional approach \cite[chapters~1 and~2]{Beattie}\cite[chapters~1, 2
and~5]{Lay} has much to recommend it, but it is not the approach we
will follow here.
}
@@ 258,7 +258,7 @@ where
\]
In general, the operation of a matrix~$A$ is that%
\footnote{
 As observed in Appendix~\ref{greek}, there are unfortunately not
+ As observed in appendix~\ref{greek}, there are unfortunately not
enough distinct Roman and Greek letters available to serve the needs
of higher mathematics. In matrix work, the Roman letters~$ijk$
conventionally serve as indices, but the same letter~$i$ also serves
@@ 368,8 +368,8 @@ elements individually by the scalar:
\bq{matrix:000:45}
[\alpha A]_{ij} = \alpha a_{ij}.
\eq
Evidently multiplication by a scalar is commutative: $\alpha A\ve x =
A\alpha\ve x$.
+Evidently multiplication by a scalar \emph{is} commutative: $\alpha A\ve
+x = A\alpha\ve x$.
Matrix addition works in the way one would expect, element by element;
and as one can see from~(\ref{matrix:000:40}), under multiplication,
@@ 411,7 +411,7 @@ $k$th column of~$B$.
If a matrix multiplying from the right is a column operator, is a matrix
multiplying from the left a \emph{row operator?} Indeed it is. Another
way to write $AX=B$, besides~(\ref{matrix:230:10}), is
+way to write that $AX=B$, besides~(\ref{matrix:230:10}), is
\bq{matrix:230:20}
[B]_{i*} = \sum_{j=1}^{n} a_{ij} [X]_{j*}.
\eq
@@ 570,7 +570,7 @@ The Kronecker equations~(\ref{matrix:150
and~(\ref{matrix:150:sift}) parallel the Dirac
equations~(\ref{integ:670:sift0}) and~(\ref{integ:670:sift}).
Chs.~\ref{matrix} and~\ref{eigen} will find frequent use for the
+Chapters~\ref{matrix} and~\ref{eigen} will find frequent use for the
Kronecker delta. Later, \S~\ref{vector:240.30} will revisit the
Kronecker delta in another light.
@@ 736,7 +736,7 @@ derivations of later sections and chapte
useful to the applied mathematician. The applied mathematical reader
who has never heretofore considered infinite dimensionality in vectors
and matrices would be well served to take the opportunity to do so
 here. As we shall discover in Ch.~\ref{gjrank}, dimensionality is a
+ here. As we shall discover in chapter~\ref{gjrank}, dimensionality is a
poor measure of a matrix's size in any case. What really counts is
not a matrix's $m \times n$ dimensionality but rather its \emph{rank.}
}
@@ 786,8 +786,8 @@ practical purposes in any case. Basical
there's not much else to it.%
\footnote{
Well, of course, there's a lot else to it, when it comes to
 dividing by zero as in Ch.~\ref{drvtv}, or to summing an infinity of
 zeros as in Ch.~\ref{integ}, but those aren't what we were speaking of
+ dividing by zero as in chapter~\ref{drvtv}, or to summing an infinity of
+ zeros as in chapter~\ref{integ}, but those aren't what we were speaking of
here.
}
@@ 1105,7 +1105,7 @@ For such a matrix,~(\ref{matrix:180:35})
The rank$r$ identity matrix~$I_r$ commutes freely past~$C$.
\ei
Evidently big identity matrices commute freely where small ones cannot
(and the general identity matrix $I=I_\infty$ commutes freely past
+(and the general identity matrix $I=I_{\infty}^\infty$ commutes freely past
everything).
\subsection
@@ 1188,7 +1188,7 @@ e_i^T$. Parallel logic naturally applie
\index{elementary operator!inverse of}
Section~\ref{matrix:120.27} has introduced the general row or column operator.
Conventionally denoted~$T$, the \emph{elementary operator} is a
+Denoted~$T$, the \emph{elementary operator} is a
simple extended row or column operator from sequences of
which more complicated extended operators can be built. The elementary
operator~$T$ comes in three kinds.%
@@ 1214,15 +1214,17 @@ operator~$T$ comes in three kinds.%
operator doesn't actually do anything. There exist legitimate
tactical reasons to forbid (as in \S~\ref{matrix:322}),
but normally this book permits.

 It is good to define a concept aesthetically. One should usually
 do so when one can; and indeed in this case one might reasonably
 promote either definition on aesthetic grounds. However, an
 applied mathematician ought not to let a mere definition entangle
 him. What matters is the underlying concept. Where the
 definition does not serve the concept well, the applied
 mathematician considers whether it were not worth the effort to
 adapt the definition accordingly.
+ %
+ % > I don't think that the rest of this footnote is needed;
+ % > delete the rest? THB
+ %It is good to define a concept aesthetically. One should usually
+ %do so when one can; and indeed in this case one might reasonably
+ %promote either definition on aesthetic grounds. However, an
+ %applied mathematician ought not to let a mere definition entangle
+ %him. What matters is the underlying concept. Where the
+ %definition does not serve the concept well, the applied
+ %mathematician considers whether it were not worth the effort to
+ %adapt the definition accordingly.%
}
\item \index{elementary operator!scaling}
\index{scaling operator!elementary}
@@ 1577,13 +1579,13 @@ means the \emph{inverse} of the elementa
T^{1}T = I = TT^{1}.
\]
Matrix inversion is not for elementary operators only, though. Many
more general matrices~$C$ also have inverses such that
+matrices~$C$ that are more general also have inverses such that
\bq{matrix:321:10}
C^{1}C = I = CC^{1}.
\eq
(Do all matrices have such inverses? No. For example, the null matrix
has no such inverse.) The broad question of how to invert a general
matrix~$C$, we leave for Chs.~\ref{gjrank} and~\ref{mtxinv} to address.
+matrix~$C$, we leave for chapters~\ref{gjrank} and~\ref{mtxinv} to address.
For the moment however we should like to observe three simple rules
involving matrix inversion.
@@ 1632,7 +1634,7 @@ Third,
\left( \prod_k C_k \right)^{1} = \coprod_k C_k^{1}.
\eq
This rule emerges upon repeated application of~(\ref{matrix:321:10}),
which yields
+which yields that
\[
\coprod_k C_k^{1} \prod_k C_k = I = \prod_k C_k \coprod_k C_k^{1}.
\]
@@ 1654,14 +1656,14 @@ eqn.~(\ref{matrix:321:60}) too applies f
active region is limited to $r \times r$. (Section~\ref{mtxinv:230} uses
the rank$r$ inverse to solve an exactly determined linear system. This
is a famous way to use the inverse, with which many or most readers will
already be familiar; but before using it so in Ch.~\ref{mtxinv}, we shall
first learn how to compute it reliably in Ch.~\ref{gjrank}.)
+already be familiar; but before using it so in chapter~\ref{mtxinv}, we shall
+first learn how to compute it reliably in chapter~\ref{gjrank}.)
Table~\ref{matrix:321:tbl} summarizes.
\begin{table}
\caption[Matrix inversion properties.]
{Matrix inversion properties.
 (The properties work equally for $C^{1(r)}$ as for~$C^{1}$ if~$A$
+ (The similarity properties work equally for $C^{1(r)}$ as for~$C^{1}$ if~$A$
honors an $r \times r$ active region. The full notation $C^{1(r)}$
for the rank$r$ inverse incidentally is not standard, usually is
not needed, and normally is not used.)}
@@ 1698,6 +1700,9 @@ adjacent pairs), one can achieve any des
can achieve the permutation $3,5,1,4,2$ by interchanging first the~$1$
and~$3$, then the~$2$ and~$5$.
+\index{proper sequence}
+\index{improper sequence}
+\index{sequence!proper or improper}
Now contemplate all possible pairs:
\settowidth\tla{$(0,n)$}
\bqb
@@ 1710,12 +1715,12 @@ Now contemplate all possible pairs:
\er
\eqb
In a given permutation (like $3,5,1,4,2$),
some pairs will appear in correct order with respect to one another,
while others will appear in incorrect order. (In $3,5,1,4,2$, the pair
$[1,2]$ appears in correct order in that the larger~$2$ stands to the right
of the smaller~$1$; but the pair $[1,3]$ appears in incorrect order in
+some pairs will appear in proper sequence with respect to one another,
+while others will appear in improper sequence. (In $3,5,1,4,2$, the pair
+$[1,2]$ appears in proper sequence in that the larger~$2$ stands to the right
+of the smaller~$1$; but the pair $[1,3]$ appears in improper sequence in
that the larger~$3$ stands to the \emph{left} of the smaller~$1$.)
If~$p$ is the number of pairs which appear in incorrect order (in the
+If~$p$ is the number of pairs which appear in improper sequence (in the
example, $p=6$), and if~$p$ is even, then we say that the permutation has
\emph{even} or \emph{positive parity;} if odd, then \emph{odd} or
\emph{negative parity.}%
@@ 1729,7 +1734,7 @@ Now consider: every interchange of adjac
increment or decrement~$p$ by one, reversing parity.
Why? Well, think about it. If two elements are adjacent and
their order is correct, then interchanging falsifies the order, but
only of that pair (no other element interposes, thus the interchange
+only of that pair (no other element interposes, so the interchange
affects the ordering of no other pair). Complementarily, if the order
is incorrect, then interchanging rectifies the order. Either way, an
adjacent interchange alters~$p$ by exactly~$\pm 1$, thus reversing
@@ 1922,7 +1927,7 @@ operators, in this case elementary scali
& \vdots & \vdots & \vdots & \vdots & \vdots & \ddots
}
\eq
(of course it might be that $\alpha_i = 1$, hence that $T_{\alpha_i[i]}
+(of course it might be that $\alpha_i = 1$, and thus that $T_{\alpha_i[i]}
= I$, for some, most or even all~$i$; however, $\alpha_i = 0$ is
forbidden by the definition of the scaling elementary). An example is
\settowidth\tla{\fn$0$}
@@ 2162,7 +2167,7 @@ as in the Schur decomposition of \S~\ref
}
The \emph{strictly triangular matrix} $LI$ or $UI$ is likewise
sometimes of interest, as in Table~\ref{matrix:330:t18}.%
\footnote{\cite[``Schur decomposition,'' 00:32, 30~Aug. 2007]{wikip}}
+\footnote{\cite[``Schur decomposition,'' 00:32, 30~Aug.\ 2007]{wikip}}
However, such matrices cannot in general be expressed as products of
elementary operators and this section does not treat them.
@@ 2189,7 +2194,7 @@ So long as the multiplication is done in
that $(\prod_k A_k)(C)$ applies first~$A_1$, then~$A_2$, $A_3$ and so on,
as row operators to~$C$; whereas $(C)(\coprod_k A_k)$ applies
first~$A_1$, then~$A_2$, $A_3$ and so on, as column operators to~$C$.
 The symbols~$\prod$ and~$\coprod$ as this book uses them can thus be
+ The symbols~$\prod$ and~$\coprod$ as this book uses them thus can be
thought of respectively as row and column sequencers.
}
then conveniently,
@@ 2477,7 +2482,7 @@ matrix's basic layout.
\caption[Properties of the parallel unit triangular matrix.]{
Properties of the parallel unit triangular matrix.
(In the table, the notation~$I_a^b$ represents the generalized
 dimensionlimited indentity matrix or truncator
+ dimensionlimited identity matrix or truncator
of eqn.~\ref{matrix:180:IMC}. Note that the inverses
$L_\^{\{k\}\,\mbox{\scriptsize$1$}} = L_\^{\{k\}'}$ and
$U_\^{\{k\}\,\mbox{\scriptsize$1$}} = U_\^{\{k\}'}$ are parallel
@@ 2704,7 +2709,7 @@ For instance, if~$\ve x$ has three eleme
\]
This is called the \emph{Jacobian derivative,} the \emph{Jacobian
matrix,} or just the \emph{Jacobian.}%
\footnote{\cite[``Jacobian,'' 00:50, 15 Sept. 2007]{wikip}}
+\footnote{\cite[``Jacobian,'' 00:50, 15 Sept.\ 2007]{wikip}}
Each of its columns is the derivative with respect to one element
of~$\ve x$.
@@ 2753,8 +2758,8 @@ and simplifying.
The shift operator of \S~\ref{matrix:340} and the Jacobian derivative of
this section complete the family of matrix rudiments we shall need to
begin to do increasingly interesting things with matrices in
Chs.~\ref{mtxinv} and~\ref{eigen}. Before doing interesting things,
+chapters~\ref{mtxinv} and~\ref{eigen}. Before doing interesting things,
however, we must treat two more foundational matrix matters. The two
are the GaussJordan decomposition and the matter of matrix rank, which
will be the subjects of Ch.~\ref{gjrank}, next.
+will be the subjects of chapter~\ref{gjrank}, next.
diff pruN 0.53.201204142/tex/mtxinv.tex 0.56.20180123.12/tex/mtxinv.tex
 0.53.201204142/tex/mtxinv.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/mtxinv.tex 20180116 13:46:55.000000000 +0000
@@ 6,7 +6,7 @@
\index{orthonormalization}
\index{matrix!inversion of}
The undeniably tedious Chs.~\ref{matrix} and~\ref{gjrank} have piled the
+The undeniably tedious chapters~\ref{matrix} and~\ref{gjrank} have piled the
matrix theory deep while affording scant practical reward. Building
upon the two tedious chapters, this chapter brings the first rewarding
matrix work.
@@ 17,7 +17,7 @@ theory that the matrix afforded any rewa
however it has. Sections~\ref{matrix:120.10} and~\ref{gjrank:340.24}
have already broached%
\footnote{
 The reader who has skipped Ch.~\ref{gjrank} might at least review
+ The reader who has skipped chapter~\ref{gjrank} might at least review
\S~\ref{gjrank:340.24}.
}
the matrix's most basic use, the primary subject of this chapter, to
@@ 31,11 +31,11 @@ characterizes it.
Now, before we go on, we want to confess that such a use alone, on the
surface of itthough interestingmight not have justified the whole
uncomfortable bulk of Chs.~\ref{matrix} and~\ref{gjrank}. We already
+uncomfortable bulk of chapters~\ref{matrix} and~\ref{gjrank}. We already
knew how to solve a simultaneous system of linear scalar equations in
principle without recourse to the formality of a matrix, after all, as
in the last step to derive~(\ref{trig:250:20}) as far back as
Ch.~\ref{trig}. Why should we have suffered two bulky chapters, if only
+chapter~\ref{trig}. Why should we have suffered two bulky chapters, if only
to prepare to do here something we already knew how to do?
The question is a fair one, but admits at least four answers. First,
@@ 48,11 +48,11 @@ system and, moreover, to do so both opti
such overdetermined systems arise commonly in applications. Third, to
solve the linear system neatly is only the primary and most
straightforward use of the matrix, not its only use: the even more
interesting eigenvalue and its incidents await Ch.~\ref{eigen}. Fourth,
+interesting eigenvalue and its incidents await chapter~\ref{eigen}. Fourth,
specific applications aside, one should never underestimate the blunt
practical benefit of reducing an arbitrarily large grid of scalars to a
single symbol~$A$, which one can then manipulate by known algebraic
rules. Most students first learning the matrix have wondered at this
+rules. Most students first learning the matrix have probably wondered at this
stage whether it were worth all the tedium; so, if the reader now
wonders, then he stands in good company. The matrix finally begins to
show its worth here.
@@ 86,7 +86,7 @@ found, each with an $n \times n$ active
such that%
\footnote{
The symbology and associated terminology might disorient a reader who
 had skipped Chs.~\ref{matrix} and~\ref{gjrank}. In this book, the
+ had skipped chapters~\ref{matrix} and~\ref{gjrank}. In this book, the
symbol~$I$ theoretically represents an $\infty \times \infty$ identity
matrix. Outside the $m \times m$ or $n \times n$ square, the
operators~$G_>$ and~$G_<$ each resemble the $\infty \times \infty$
@@ 166,8 +166,10 @@ no trouble here. The factors do exist,
them.
Equation~(\ref{mtxinv:220:20}) features the important matrix~$A^{1}$,
the \emph{rank$n$ inverse} of~$A$. We have not yet much studied the
rank$n$ inverse, but we have defined it in~(\ref{matrix:321:20}),
+the \emph{rank$n$ inverse} of~$A$.
+
+We have not yet much studied the
+rank$n$ inverse, but have at least defined it in~(\ref{matrix:321:20}),
where we gave it the fuller, nonstandard notation $A^{1(n)}$. When
naming the rank$n$ inverse in words one usually says simply, ``the
inverse,'' because the rank is implied by the size of the square active
@@ 186,7 +188,7 @@ and~$A$is, not~$I$, but~$I_n$.
Properties that emerge from~(\ref{mtxinv:220:20}) include the following.
\bi
\item
 Like~$A$, the rank$n$ inverse~$A^{1}$ (more fully written
+ Like~$A$, the rank$n$ inverse~$A^{1}$ (more fully written as
$A^{1(n)}$) too is an $n \times n$ square matrix of full rank $r=n$.
\item
Since~$A$ is square and has full rank (\S~\ref{gjrank:340.25}), its
@@ 201,10 +203,30 @@ Properties that emerge from~(\ref{mtxinv
If $B=A^{1}$ then $B^{1}=A$. That is,~$A$ is itself the rank$n$
inverse of~$A^{1}$. The matrices~$A$ and~$A^{1}$ thus form an
exclusive, reciprocal pair.
+ % The footnote just following is probably unneeded.
+ %\footnote{%
+ % A professional mathematician might more rigorously prove such a
+ % proposition somewhat as follows. Let~$A$ be an $n \times n$
+ % matrix of full rank. Suppose two $n \times n$ matrices~$B_1$
+ % and~$B_2$ such that $B_1A = I_n$ and $B_2A = I_n$. Subtracting
+ % the one equation from the other yields that $[B_2B_1]A = 0$,
+ % which has each row of the $n \times n$ matrix $[B_2B_1]$ to
+ % represent a null linear combination of the rows of~$A$. However,
+ % \S~\ref{gjrank:340.25} has shown that a fullrank square matrix
+ % like~$A$ has independent rows, which according to the definition
+ % of \emph{independence} in \S~\ref{gjrank:335} implies that each
+ % row of $[B_2B_1]$ is null. That is, $[B_2B_1]=0$ and thus
+ % $B_2=B_1$.
+ %
+ % Judging such a proposition to be sufficiently obvious on the face
+ % of it, though, this book seldom bothers with such additional
+ % rigor.%
+ %}
\item
If~$B$ is an $n \times n$ square matrix and either
$BA = I_n$ or $AB = I_n$, then both equalities in fact hold; thus,
 $B = A^{1}$. One can have neither equality without the other.
+ $B = A^{1}$. One can have neither equality without the
+ other.
\item
Only a square, $n \times n$ matrix of full rank $r=n$ has a rank$n$
inverse. A matrix~$A'$ which is not square, or whose rank falls
@@ 258,7 +280,7 @@ and~\ref{gjrank:340.20} have shown, so i
of~(\ref{mtxinv:220:20}) such a matrix has no inverse; for, if it had,
then~$A'^{1}$ would by definition represent a row or column operation
which impossibly promoted~$A'$ to the full rank $r=n$ of~$I_n$. Indeed,
in that it has no inverse such a degenerate matrix closely resembles
+in that it has no inverse such a degenerate matrix resembles
the scalar~$0$, which has no reciprocal. Mathematical convention owns a
special name for a square matrix which is degenerate and thus has no
inverse; it calls it a \emph{singular} matrix.
@@ 310,22 +332,13 @@ coefficients,~(\ref{mtxinv:230:15}) conc
system of~$n$ linear scalar equations in~$n$ scalar unknowns. It is the
classic motivational result of matrix theory.
%\footnote{
% Recent English usage has almost forgotten the right meaning of the
% useful word \emph{apology.} Properly used, as here, the word refers
% to ``something said or written in defense or justification of
% what appears to others wrong, or of what may be liable to
% disapprobation''~\cite{Webster1913}. So used, the word does not imply
% regret.
%}

\index{theory}
It has taken the book two long chapters to reach~(\ref{mtxinv:230:15}).
If one omits first to prepare the theoretical ground sufficiently to
support more advanced matrix work, then one can indeed
reach~(\ref{mtxinv:230:15}) with rather less effort than the book has
done.%
\footnote{
+\footnote{%
For motivational reasons, introductory, tutorial linear algebra
textbooks like~\cite{Hefferon} and~\cite{Lay} rightly yet invariably
invert the general square matrix of full rank much earlier,
@@ 334,13 +347,13 @@ done.%
twofold. First, the student fails to develop the GaussJordan
decomposition properly, instead learning the less elegant but easier
to grasp ``row echelon form'' of ``Gaussian elimination''
 \cite[Ch.~1]{Hefferon}\cite[\S~1.2]{Lay}which makes good
+ \cite[chapter~1]{Hefferon}\cite[\S~1.2]{Lay}which makes good
matrixarithmetic drill but leaves the student imperfectly prepared
when the time comes to study kernels and eigensolutions or to read and
write matrixhandling computer code. Second, in the long run the
tutorials save no effort, because the student still must at some point
develop the theory underlying matrix rank and supporting each of the
 several coincident properties of \S~\ref{eigen:370}. What the
+ several co\"incident properties of \S~\ref{eigen:370}. What the
tutorials do is pedagogically necessaryit is how the writer first
learned the matrix and probably how the reader first learned it,
toobut it is appropriate to a tutorial, not to a study reference
@@ 348,12 +361,9 @@ done.%
In this book, where derivations prevail, the proper place to invert
the general square matrix of full rank is here. Indeed, the inversion
 here goes smoothly, because Chs.~\ref{matrix} and~\ref{gjrank} have
+ here goes smoothly, because chapters~\ref{matrix} and~\ref{gjrank} have
laid under it a firm foundation upon whichand supplied it the right
 tools with whichto work.
 % (This writer usually suspects novelty in English style, but is not
 % the emdash a fine advance in punctuation? If only Gibbon had had
 % it to use.)
+ tools with whichto work.%
}
As the chapter's introduction has observed, however, we shall soon meet
additional interesting applications of the matrix which in any case
@@ 374,7 +384,7 @@ linear system described by a singular sq
a good approximate solution is given by the \emph{pseudoinverse} of
\S~\ref{mtxinv:320}. In the language of \S~\ref{gjrank:340.24},
the singular square matrix characterizes a system that is both
underdetermined and overdetermined, thus degenerate.
+underdetermined and overdetermined, and thus degenerate.
% 
@@ 452,6 +462,7 @@ The \emph{GaussJordan kernel formula}%
for~(\ref{mtxinv:245:kernel}). This name seems as fitting as any.
}
\bq{mtxinv:245:kernel}
+ % I have checked this by computer for a 4x5 test case. THB
A^K = S^{1}K^{1}H_rI_{nr} = G_<^{1}H_rI_{nr}
\eq
gives a complete kernel~$A^K$ of~$A$, where~$S^{1}$, $K^{1}$
@@ 509,7 +520,7 @@ $(I_nI_r)S\ve x$, which is the remainin
Notice how we now associate the factor $(I_nI_r)$ rightward as a row
truncator, though it had first entered acting leftward as a column
truncator. The flexibility to reassociate operators in such a way
 is one of many good reasons Chs.~\ref{matrix} and~\ref{gjrank} have
+ is one of many good reasons chapters~\ref{matrix} and~\ref{gjrank} have
gone to such considerable trouble to develop the basic theory of the
matrix.
}
@@ 567,7 +578,8 @@ For all the~$\ve e_j$ at once,
\[
S\ve x(I_{nr}, 0) = (I  I_rK)H_rI_{nr}.
\]
But if all the~$\ve e_j$ at once, the columns of~$I_{nr}$, exactly
+But if all the~$\ve e_j$ at oncethat is, if all
+the columns of~$I_{nr}$exactly
address the domain of~$\ve a$, then the columns of $\ve x(I_{nr}, 0)$
likewise exactly address the range of $\ve x(\ve a, 0)$.
Equation~(\ref{mtxinv:245:10}) has already named this range~$A^K$,
@@ 604,8 +616,11 @@ by which%
\bq{mtxinv:245:41}
SA^K = (II_rK)H_rI_{nr}.
\eq
Leftmultiplying by $S^{1}=S^{*}=S^{T}$ produces the alternate kernel
formula
+Leftmultiplying by
+\bq{mtxinv:245:415}
+ S^{1}=S^{*}=S^{T}
+\eq
+produces the alternate kernel formula
\bq{mtxinv:245:42}
A^K = S^{1}(II_rK)H_rI_{nr}.
\eq
@@ 661,7 +676,7 @@ and canceling terms, we have that
of $K$'s interesting content lies by construction right of its $r$th
column). Now we have enough to go on with.
Substituting~(\ref{mtxinv:245:35}) and~(\ref{mtxinv:245:36})
into~(\ref{mtxinv:245:33}) yields
+into~(\ref{mtxinv:245:33}) yields that
\[
SA^K = [(I_nK^{1}I_r)  (IK^{1})]H_r.
\]
@@ 897,7 +912,7 @@ family.
\index{homogeneous solution}
\index{driving vector}
\index{vector, driving}
+\index{vector!driving}
\index{split form}
The nonoverdetermined linear system~(\ref{mtxinv:240:05}) by definition
admits more than one solution~$\ve x$ for a given driving vector~$\ve
@@ 912,7 +927,7 @@ split the system as
\eq
which, when the second line is added to the first and the third is
substituted, makes the whole form~(\ref{mtxinv:240:05}). Splitting the
system does not change it, but it does let us treat the system's first
+system does not change it but does let us treat the system's first
and second lines in~(\ref{mtxinv:240:20}) separately. In the split
form, the symbol~$\ve x_1$ represents any one $n$element vector that
happens to satisfy the form's first linemany are possible; the
@@ 978,28 +993,85 @@ That is,
\index{nonoverdetermined linear system!general solution of}
Assembling~(\ref{mtxinv:245:kernel}), (\ref{mtxinv:240:20})
and~(\ref{mtxinv:240:25}) yields the general solution
+and~(\ref{mtxinv:240:25}) in light of~(\ref{gjrank:341:GJinv})
+yields the general solution
\bq{mtxinv:240:50}
\ve x = S^{1}(G_>^{1}\ve b + K^{1}H_rI_{nr} \ve a)
\eq
to the nonoverdetermined linear system~(\ref{mtxinv:240:05}).
\index{arithmetic!exact}
+\index{arithmetic!exact and inexact}
\index{exact arithmetic}
+\index{inexact arithmetic}
+In exact arithmetic~(\ref{mtxinv:240:50}) solves the nonoverdetermined
+linear system in theory exactly. Therefore~(\ref{mtxinv:240:50})
+properly concludes the section. Nevertheless, one should like to add a
+significant practical observation regarding \emph{inexact arithmetic}
+as follows.
+
\index{rounding error}
\index{error!due to rounding}
\index{pivot!small}
\index{matrix!large}
In exact arithmetic~(\ref{mtxinv:240:50}) solves the nonoverdetermined
linear system in theory exactly. Of course, practical calculations are
usually done in limited precision, in which compounded rounding error in
the last bit eventually disrupts~(\ref{mtxinv:240:50}) for matrices
+\index{register, computer's floatingpoint}
+\index{computer register}
+\index{floatingpoint register}
+\index{library, programmer's}
+\index{computer memory}
+\index{memory, computer}
+\index{mantissa}
+Practical calculations are usually done in inexact arithmetic insofar as
+they are done in the limited precision of a computer's floatingpoint
+registers. Exceptions are possibleexactarithmetic libraries are
+available for a programmer to callbut exactarithmetic libraries are
+slow and memoryintensive and, for this reason among others, are only
+occasionally used in practice. When they are not used,
+compounded rounding error in a floatingpoint register's last
+bit of mantissa\footnote{%
+ % diagn: this footnote, substantially rewritten, wants review
+ \index{floatingpoint exponent}%
+ \index{exponent!floatingpoint}%
+ \index{floatingpoint infinity}%
+ \index{infinity!floatingpoint}%
+ \index{floatingpoint zero}%
+ \index{zero!floatingpoint}%
+ \index{bit}%
+ What is a \emph{mantissa?}
+ Illustration: in the number $1.65\times 10^6$,
+ the mantissa is 1.65. However, computers do it in binary rather
+ than in decimal, typically with fiftytwo ($\text{0x34}$) stored
+ bits of mantissa not counting
+ the leading bit which, in binary, is not stored because
+ it is always~1. (There exist implementational details like
+ floatingpoint ``denormals'' which might seem pedantically to
+ contradict the always1 rule, but that is a
+ computerscience technicality which is hardly interesting even in
+ computer science and is uninteresting in the context of the present
+ discussion. What might be interesting in the present context
+ is this: a standard doubleprecision floatingpoint representation
+ hasbesides fiftytwo bits of
+ mantissaalso eleven, $\text{0xB}$,
+ bits for an exponent and one bit for a sign. The
+ smallest positive number representable without denormalization
+ is~$2^{\text{0x3FE}}$; the
+ largest is
+ \mbox{$\text{0x1.FFFF\,FFFF\,FFFF\,F}\times 2^{\text{0x3FF}}$},
+ just less than~$2^{\text{0x400}}$. If the code for a
+ full~$2^{\text{0x400}}$ is entered, then that is held to
+ represent infinity. Similarly, the code for~$2^{\text{0x3FF}}$
+ represents zero. If you think that the code for~$2^{\text{0x400}}$
+ should instead represent zero, no such code can actually be entered,
+ for the exponent's representation is offset by one; and there are many
+ other details beyond the book's scope.)}
+eventually disrupts~(\ref{mtxinv:240:50}) for matrices
larger than some moderately large size. Avoiding unduly small pivots
early in the GaussJordan extends~(\ref{mtxinv:240:50})'s reach to larger
matrices, and for yet larger matrices a bewildering variety of more
sophisticated techniques exists to mitigate the problem, which can be
vexing because the problem arises even when the matrix~$A$ is exactly
known. Equation~(\ref{mtxinv:240:50}) is useful and correct, but one
+known.
+
+Equation~(\ref{mtxinv:240:50}) is thus useful and correct, but one
should at least be aware that it can in practice lose floatingpoint
accuracy when the matrix it attacks grows too large. (It can also lose
accuracy when the matrix's rows are almost dependent, but that is more
@@ 1040,7 +1112,7 @@ develop the mathematics to handle the ov
The quantity%
\footnote{\label{mtxinv:315:95}%
Alas, the alphabet has only so many letters (see
 Appendix~\ref{greek}). The~$\ve r$ here is unrelated to matrix
+ appendix~\ref{greek}). The~$\ve r$ here is unrelated to matrix
rank~$r$.
}$\mbox{}^,$%
\footnote{
@@ 1069,7 +1141,7 @@ regard the candidate solution~$\ve x$.
\label{mtxinv:320}
\index{pseudoinverse}
\index{MoorePenrose pseudoinverse}
\index{Moore, E.H. (18621932)}
+\index{Moore, E.~H.\ (18621932)}
\index{Penrose, Roger (1931)}
\index{least squares}
\index{squares, least}
@@ 1170,7 +1242,7 @@ resulting line will predict future produ
\index{linear system!overdetermined}
\index{overdetermined linear system}
That is all mathematically irreproachable. By the fifth Saturday
+The foregoing is all mathematically irreproachable. By the fifth Saturday
however we shall have gathered more production data, plotted on the
figure's right, to which we should like to fit a better line to predict
production more accurately.
@@ 1530,7 +1602,7 @@ Each step in the present paragraph is re
\footnote{
The paragraph might inscrutably but logically instead have ordered the
steps in reverse as in \S\S~\ref{noth:420.20} and~\ref{inttx:250}.
 See Ch.~\ref{noth}'s footnote~\ref{noth:420:85}.
+ See chapter~\ref{noth}'s footnote~\ref{noth:420:85}.
}
so the assertion in the last form is logically equivalent to the
conjecture's first point, with which the paragraph began. Moreover, the
@@ 1676,9 +1748,9 @@ $\tilde{\ve f}_k(\ve x_{k+1}) = 0$:
\]
Solving for $\ve x_{k+1}$ (approximately if necessary), we have that
\bq{mtxinv:NR}
 \ve x_{k+1} = \left. \ve x 
+ \ve x_{k+1} = \left\{ \ve x 
\left[\frac{d}{d\ve x}\ve f(\ve x)\right]^\dagger
 \ve f(\ve x) \right_{\ve x=\ve x_k},
+ \ve f(\ve x) \right\}_{\ve x=\ve x_k},
\eq
where~$[\cdot]^\dagger$ is the MoorePenrose pseudoinverse of
\S~\ref{mtxinv:320}which is just the ordinary inverse~$[\cdot]^{1}$
@@ 1755,12 +1827,13 @@ product,}%
which of~$\ve a$ and~$\ve b$ is conjugated depends on the author.
Most recently, at least in the author's country, the usage $\langle
\ve a, \ve b \rangle \equiv \ve a^{*} \cdot \ve b$ seems
 to be emerging as standard where the dot is not used
 \cite[\S~3.1]{Beattie}\cite[Ch.~4]{Franklin}.
 This book prefers the dot.
+ to be emerging as standard where the dot is not used, as in
+ \cite[\S~3.1]{Beattie}\cite[chapter~4]{Franklin} (but slightly
+ contrary to \cite[\S~2.1]{HFDavis}, for example). At any rate, this
+ book prefers the dot.
}
is the product of the two vectors to the extent to which they run in the
same direction. It is written
+same direction. It is written as
\[
\ve a \cdot \ve b.
\]
@@ 1773,9 +1846,10 @@ In general,
\]
But if the dot product is to mean anything, it must be that
\bq{mtxinv:445:05}
 \ve e_i \cdot \ve e_j = \delta_{ij}.
+ \ve e_i \cdot \ve e_j = \delta_{ij},
\eq
Therefore,
+where the Kronecker delta~$\delta_{ij}$ is as defined in
+\S~\ref{matrix:150}. Therefore,
\[
\ve a \cdot \ve b =
a_1b_1 + a_2b_2 + \cdots + a_nb_n;
@@ 1850,10 +1924,10 @@ have more than three elements each.
% 
\section{The complex vector triangle inequalities}
+\section{Complex vector inequalities}
\label{mtxinv:447}
\index{triangle inequalities!complex vector}
\index{triangle inequalities!vector}
+\index{inequalities!complex vector}
+\index{inequalities!vector}
The triangle inequalities~(\ref{alggeo:323:20})
and~(\ref{trig:278:triangle2}) lead one to hypothesize generally that
@@ 1862,92 +1936,150 @@ and~(\ref{trig:278:triangle2}) lead one
+ \ve b\right \le \left\ve a\right + \left\ve b\right
\eq
for any complex, $n$dimensional vectors~$\ve a$ and~$\ve b$.
+Section~\ref{mtxinv:447.30} will prove
+% bad break
+(\ref{mtxinv:447:10}); but first, \S~\ref{mtxinv:447.20} develops
+a related inequality by Schwarz.
\index{cleverness}
The proof of the sum hypothesis that $\left\ve a
+ \ve b\right \le \left\ve a\right + \left\ve b\right$ is by
contradiction. We suppose falsely that
\[
 \left\ve a + \ve b\right > \left\ve a\right + \left\ve b\right.
\]
Squaring and using~(\ref{mtxinv:445:dotmag}),
+\subsection{The Schwarz inequality}
+\label{mtxinv:447.20}
+\index{inequality!Schwarz}
+\index{Schwarz inequality}
+\index{CauchySchwarz inequality}
+\index{Schwarz, Hermann (18431921)}
+
+The \emph{Schwarz inequality,} alternately the \emph{CauchySchwarz
+inequality,}\footnote{%
+ Pronounced as ``Schwartz,'' almost as ``{S}{c}{h}{w}{o}{r}{t}{z}.''
+ You can sound out the German~\emph{w} like an English~\emph{v} if you
+ wish. The other name being French is pronounced as
+ ``Co{s}{h}{e}{e},'' preferably with little stress butto the extent
+ necessary while speaking Englishwith stress laid on the first
+ syllable.
+ %
+ %And why isn't it Schwartz, like Leibniz is Leibnitz? Various
+ %reasons, too boring to explain here.%
+}
+has that
+\bq{mtxinv:schwarz}
+ \ve a^{*} \cdot \ve b \le \ve a \ve b.
+\eq
+Roughly in words: the dot product does not exceed the product of
+lengths.
+
+If the threedimensional geometrical vectors with their dot products of
+chapters~\ref{trig} and~\ref{vector} are already familiar to you
+then~(\ref{mtxinv:schwarz}) might seem too obvious to bother proving.
+The present chapter however brings an arbitrary number~$n$ of
+dimensions. Furthermore, elements in any or every dimension can be complex.
+Therefore, the geometry is not so easy to visualize in the general case.
+One would prefer an algebraic proof.\footnote{See~\cite{Wu/Wu}
+and~\cite[\S~2.1]{HFDavis} for various other proofs, one of which partly
+resembles the proof given here. See also \cite[``CauchySchwarz
+inequality,'' 17:56, 22~May 2017]{wikip}.}
+
+\index{symmetry}
+\index{asymmetry}
+\index{index!swapping of}
+\index{swapping of indices}
+The proof is by contradiction. We suppose falsely that
\[
 (\ve a + \ve b)^{*} \cdot (\ve a + \ve b)
 > \ve a^{*}\cdot\ve a + 2\left\ve a\right\left\ve b\right + \ve b^{*}\cdot\ve b.
+ \ve a^{*} \cdot \ve b > \ve a \ve b.
\]
Distributing factors and canceling like terms,
+Squaring and using~(\ref{alggeo:225:24}) and~(\ref{mtxinv:445:dotmag}),
\[
 \ve a^{*}\cdot\ve b + \ve b^{*}\cdot\ve a
 > 2\left\ve a\right\left\ve b\right.
+ (\ve a^{*} \cdot \ve b)(\ve b^{*} \cdot \ve a)
+ > (\ve a^{*} \cdot \ve a)(\ve b^{*} \cdot \ve b),
\]
Splitting~$\ve a$ and~$\ve b$ each into real and imaginary parts on the
inequality's left side and then halving both sides,
\[
 \Re(\ve a)\cdot\Re(\ve b) + \Im(\ve a)\cdot\Im(\ve b)
 > \left\ve a\right\left\ve b\right.
\]
Defining the new, $2n$dimensional \emph{real} vectors
\[
 \ve f \equiv \mf{c}{
 \Re{(\ve a_1)} \\
 \Im{(\ve a_1)} \\
 \Re{(\ve a_2)} \\
 \Im{(\ve a_2)} \\
 \vdots \\
 \Re{(\ve a_n)} \\
 \Im{(\ve a_n)}
 }, \ \
 \ve g \equiv \mf{c}{
 \Re{(\ve b_1)} \\
 \Im{(\ve b_1)} \\
 \Re{(\ve b_2)} \\
 \Im{(\ve b_2)} \\
 \vdots \\
 \Re{(\ve b_n)} \\
 \Im{(\ve b_n)}
 },
\]
we make the inequality to be
+or in other words,
\[
 \ve f \cdot \ve g > \left\ve f\right\left\ve g\right,
+ \sum_{i,j} a_i^{*}b_i^{\mbox{}}b_j^{*}a_j^{\mbox{}}
+ > \sum_{i,j} a_i^{*}a_i^{\mbox{}}b_j^{*}b_j^{\mbox{}},
\]
in which we observe that the left side must be positive because the
right side is nonnegative. (This naturally is impossible for any case in
which $\ve f = 0$ or $\ve g = 0$, among others, but wishing to establish
impossibility for all cases we pretend not to notice and continue
reasoning as follows.) Squaring again,
\[
 (\ve f \cdot \ve g)^2 > (\ve f \cdot \ve f)(\ve g \cdot \ve g);
+wherein each side of the inequality is realvalued by construction (that
+is, each side is realvalued because we had started with a real
+inequality anddespite that elements on either side may be
+complexno step since the start has made either side of the inequality
+complex as a whole). One would like to segregate conjugated elements
+for separate handling; it is not easy to see how to segregate them all
+at once but to reorder factors as
+\[
+ \sum_{i,j} \left[(a_ib_j)^{*}(b_ia_j)\right]
+ > \sum_{i,j} \left[(a_ib_j)^{*}(a_ib_j)\right]
+\]
+at least makes a step in the right direction. The last inequality is
+unhelpfully asymmetric, though, so we swap indices $i\lra j$ to write
+the same inequality as that
+\[
+ \sum_{i,j} \left[(b_ia_j)^{*}(a_ib_j)\right]
+ > \sum_{i,j} \left[(b_ia_j)^{*}(b_ia_j)\right].
+\]
+The swapped inequality is asymmetric too but one can add it to the
+earlier, unswapped inequality to achieve the symmetric form
+\[
+ \sum_{i,j} \left[(a_ib_j)^{*}(b_ia_j)
+ + (b_ia_j)^{*}(a_ib_j)\right]
+ > \sum_{i,j} \left[(a_ib_j)^{*}(a_ib_j)
+ +(b_ia_j)^{*}(b_ia_j)\right].
+\]
+Does this help? Indeed it does. Transferring all terms to the
+inequality's right side,
+\[
+ 0 > \sum_{i,j} \left[
+ (a_ib_j)^{*}(a_ib_j) +(b_ia_j)^{*}(b_ia_j)
+  (a_ib_j)^{*}(b_ia_j)  (b_ia_j)^{*}(a_ib_j)
+ \right],
\]
or, in other words,
+Factoring,
\[
 \sum_{i,j} f_ig_if_jg_j > \sum_{i,j} f_i^2g_j^2.
\]
Reordering factors,
+ 0 > \sum_{i,j} \left[
+ (a_ib_jb_ia_j)^{*}
+ (a_ib_jb_ia_j)
+ \right] = \sum_{i,j} \left a_ib_jb_ia_j \right^2,
+\]
+which inequality is impossible because $0 \le \mbox{$\cdot$}^2$ regardless of
+what the~\mbox{$\cdot$} might be. The contradiction proves false the
+assumption that gave rise to it, thus establishing the Schwarz
+inequality of~(\ref{mtxinv:schwarz}).
+
+\subsection{Triangle inequalities}
+\label{mtxinv:447.30}
+\index{triangle inequalities!complex vector}
+\index{triangle inequalities!vector}
+
+The proof of the sum hypothesis~(\ref{mtxinv:447:10}) that $\left\ve a
++ \ve b\right \le \left\ve a\right + \left\ve b\right$ is again by
+contradiction. We suppose falsely that
\[
 \sum_{i,j} [(f_ig_j)(g_if_j)] > \sum_{i,j} (f_ig_j)^2.
+ \left\ve a + \ve b\right > \left\ve a\right + \left\ve b\right.
\]
Subtracting $\sum_i (f_ig_i)^2$ from each side,
+Squaring and using~(\ref{mtxinv:445:dotmag}),
\[
 \sum_{i \neq j} [(f_ig_j)(g_if_j)] > \sum_{i \neq j} (f_ig_j)^2,
+ (\ve a + \ve b)^{*} \cdot (\ve a + \ve b)
+ > \ve a^{*}\cdot\ve a + 2\left\ve a\right\left\ve b\right + \ve b^{*}\cdot\ve b.
\]
which we can cleverly rewrite in the form
+Distributing factors and canceling like terms,
\[
 \sum_{i < j} [2(f_ig_j)(g_if_j)] > \sum_{i < j} [(f_ig_j)^2 + (g_if_j)^2],
+ \ve a^{*}\cdot\ve b + \ve b^{*}\cdot\ve a
+ > 2\left\ve a\right\left\ve b\right,
\]
where $\sum_{i \sum_{i < j} [(f_ig_j)^2 + 2(f_ig_j)(g_if_j) + (g_if_j)^2].
+ 2\Re\left({\ve a^{*}\cdot\ve b}\right) > 2\left\ve a\right\left\ve b\right.
\]
This is
+However, the real part of the Schwarz inequality~(\ref{mtxinv:schwarz}) has that
\[
 0 > \sum_{i < j} [f_ig_j + g_if_j]^2,
+ \Re\left({\ve a^{*}\cdot\ve b}\right)
+ \le \left{\ve a^{*}\cdot\ve b}\right
+ \le \left\ve a\right\left\ve b\right,
\]
which, since we have constructed the vectors~$\ve f$ and~$\ve g$ to have
real elements only, is impossible in all cases. The contradiction
proves false the assumption that gave rise to it, thus establishing the
sum hypothesis of~(\ref{mtxinv:447:10}).
+which, when doubled, contradicts the last finding.
+The contradiction proves false the
+assumption that gave rise to it, thus establishing the sum hypothesis
+of~(\ref{mtxinv:447:10}).
The difference hypothesis that $\left\ve a\right\left\ve b\right
\le \left\ve a + \ve b\right$ is established by defining a vector $\ve
@@ 1955,8 +2087,7 @@ c$ such that
\[
\ve a + \ve b + \ve c = 0,
\]
whereupon according to the sum hypothesis (which we have already
established),
+whereupon according to the sum hypothesis
\[
\begin{split}
\left\ve a + \ve c\right &\le \left\ve a\right + \left\ve c\right, \\
@@ 1971,7 +2102,12 @@ That is,
\end{split}
\]
which is the difference hypothesis in disguise. This completes the proof
of~(\ref{mtxinv:447:10}).
+of the triangle inequalities~(\ref{mtxinv:447:10})
+
+\index{Minkowski, Hermann (18641909)}
+\index{Minkowski inequality}
+The triangle sum inequality is alternately called the \emph{Minkowski
+inequality.}\footnote{\cite[\S~2.1]{HFDavis}}
As in \S~\ref{trig:278}, here too we can extend the sum inequality to
the even more general form
@@ 2004,8 +2140,8 @@ The $m \times (mr)$ kernel (\S~\ref{mtx
\eq
is an interesting matrix. By definition of the kernel, the columns
of~$A^{*K}$ are the independent vectors~$\ve u_j$ for which $A^{*}\ve
u_j = 0$, whichinasmuch as the rows of~$A^{*}$ are the adjoints of
the \emph{columns} of~$A$is possible only when each~$\ve u_j$ lies
+u_j = 0$, whichinasmuch as the \emph{rows} of~$A^{*}$ are the adjoints of
+the columns of~$A$is possible only when each~$\ve u_j$ lies
orthogonal to every column of~$A$. This says that the columns of
$A^\perp \equiv A^{*K}$ address the complete space of vectors that lie
orthogonal to $A$'s columns, such that
@@ 2050,17 +2186,17 @@ $A^K=[3\;4\;5;\mbox{$1$}\;1\;0]^T$ to r
are not mistaken arbitrarily to rescale each column of my~$A^K$ by
a separate nonzero factor, instead for instance representing the same
kernel as $A^K=[6\;8\;\mbox{0xA};\frac 1 7\;\mbox{$\frac 1 7$}\;0]^T$.
Kernel vectors have no inherent scale. Style generally asks the applied
mathematician to remove the false appearance of scale by
+Kernel vectors have no inherent scale. Style generally asks one
+to remove the false appearance of scale by
using~(\ref{mtxinv:445:20}) \emph{to normalize} the columns of a kernel
matrix to unit magnitude before reporting them. The same goes for the
eigenvectors of Ch.~\ref{eigen} to come.
+eigenvectors of chapter~\ref{eigen} to come.
\index{orthogonalization}
\index{vector!orthogonalization of}
Where a kernel matrix~$A^K$ has two or more columns (or a repeated
eigenvalue has two or more eigenvectors), style generally asks the
applied mathematician not only to normalize but also \emph{to
+eigenvalue has two or more eigenvectors), style generally asks one
+not only to normalize but also \emph{to
orthogonalize} the columns before reporting them. One orthogonalizes a
vector~$\ve b$ with respect to a vector~$\ve a$ by subtracting from~$\ve
b$ a multiple of~$\ve a$ such that
@@ 2072,7 +2208,7 @@ b$ a multiple of~$\ve a$ such that
\]
where the symbol~$\ve b_\perp$ represents the orthogonalized vector.
Substituting the second of these equations into the first and solving
for~$\beta$ yields
+for~$\beta$ yields that
\[
\beta = \frac{\ve a^{*} \cdot \ve b}{\ve a^{*} \cdot \ve a}.
\]
@@ 2096,7 +2232,7 @@ or, in matrix notation,
\ve b_\perp = \ve b 
\vu a ( \vu a^{*} )( \ve b ).
\]
This is arguably better written
+This is arguably better written,
\bq{mtxinv:460:35}
\ve b_\perp = \left[I  (\vu a)(\vu a^{*})\right]\ve b
\eq
@@ 2104,7 +2240,7 @@ This is arguably better written
than the scalar $[\vu a^{*}][\vu a]$).
One \emph{orthonormalizes} a set of vectors by orthogonalizing them with
respect to one another, then by normalizing each of them to unit magnitude.
+respect to one another and then normalizing each of them to unit magnitude.
The procedure to orthonormalize several vectors
\[
\left\{ \ve x_1,\ve x_2,\ve x_3,\ldots,\ve x_n\right\}
@@ 2190,7 +2326,7 @@ it repeatedly overwritesand, actually
that much, working rather in the memory space it has already reserved
for~$\vu x_{j\perp}$.)%
\footnote{
 \cite[``GramSchmidt process,'' 04:48, 11~Aug. 2007]{wikip}
+ \cite[``GramSchmidt process,'' 04:48, 11~Aug.\ 2007]{wikip}
}
Other equations one algorithmizes can likewise benefit from thoughtful
@@ 2420,14 +2556,14 @@ The algorithm, in detail:
Gram\newline Schmidt}
\index{decomposition!differences between the
Gram\newline Schmidt and GaussJordan}
Though the GramSchmidt algorithm broadly resembles the
% bad break
Gauss\linebreak Jordan,
+\noindent
+Though the GramSchmidt algorithm broadly resembles the GaussJordan,
at least two significant differences stand out: (i)~the GramSchmidt is
onesided because it operates only on the columns of~$\tilde Q$, never
on the rows; (ii)~since $Q$ is itself dimensionlimited, the
GramSchmidt decomposition~(\ref{mtxinv:QR}) needs and has no explicit
factor~$I_r$.
+GramSchmidt decomposition
+% bad break
+(\ref{mtxinv:QR}) needs and has no explicit factor~$I_r$.
\index{GramSchmidt decomposition!factor~$S$ of}
As in \S~\ref{gjrank:340.60}, here also one sometimes prefers that
@@ 2462,42 +2598,41 @@ factors is the $m \times r$ orthonormali
orthonormal columns address the same space the columns of~$A$ themselves
address. If~$Q$ reaches the maximum possible rank $r=m$, achieving
square, $m \times m$ shape, then it becomes a \emph{unitary
matrix}the subject of \S~\ref{mtxinv:465}. Before treating the
unitary matrix, however, let us pause to extract a kernel from the
GramSchmidt decomposition in \S~\ref{mtxinv:460.40}, next.
+matrix}the subject of \S~\ref{mtxinv:465}.
\subsection{The GramSchmidt kernel formula}
+Before treating the unitary matrix, however, let us pause to develop the
+orthogonal complement by GramSchmidt in \S~\ref{mtxinv:460.40}, next.
+
+\subsection{The orthogonal complement by GramSchmidt}
\label{mtxinv:460.40}
\index{GramSchmidt kernel formula}
\index{kernel!GramSchmidt formula for}
+\index{GramSchmidt orthogonal complement}
+\index{orthogonal complement!by GramSchmidt}
Like the GaussJordan decomposition in~(\ref{mtxinv:245:kernel}), the
GramSchmidt decomposition too brings a kernel formula. To develop and
apply it, one decomposes an $m \times n$ matrix
+Having decomposed an $m \times n$ matrix as
\bq{mtxinv:460:74}
 A=QR
+ A=QR=(QI_r)R,
\eq
per the GramSchmidt~(\ref{mtxinv:QR}) and its algorithm in
\S~\ref{mtxinv:460.30}. Observing that the~$r$ independent columns of
the $m \times r$ matrix~$Q$ address the same space the columns
of~$A$ address, one then constructs the $m \times (r+m)$ matrix
+observing that the~$r$ independent columns of the $m \times r$
+matrix~$Q=QI_r$ address the same space the columns of~$A$ address,
+GramSchmidt computes an orthogonal complement~(\ref{mtxinv:450:perp})
+by constructing the $m \times (r+m)$ matrix
\bq{mtxinv:460:76}
 A' \equiv Q + I_mH_{r} = \left[ \br{cc}Q&I_m\er \right]
+ A' \equiv QI_r + I_mH_{r} = \left[ \br{cc}QI_r&I_m\er \right].
\eq
and decomposes it too,
+This constructed matrix~$A'$ is then itself decomposed,
\bq{mtxinv:460:77}
A' = Q'R',
\eq
again by GramSchmidtwith the differences that, this time, one
chooses $p=1,2,3,\ldots,r$ during the first~$r$ instances of the
algorithm's step~\ref{mtxinv:461:s20}, and that one skips the
unnecessary step~\ref{mtxinv:461:s50} for all $j \le r$;
+algorithm's step~\ref{mtxinv:461:s20} and that one skips the
+unnecessary step~\ref{mtxinv:461:s50} for all $j \le r$,
on the ground that the earlier GramSchmidt application
of~(\ref{mtxinv:460:74}) has already orthonormalized first~$r$ columns
of~$A'$, which columns, after all, are just~$Q$. The resulting $m
+of~$A'$, which columns, after all, are just~$Q=QI_r$. The resulting $m
\times m$, fullrank square matrix
\bq{mtxinv:460:kernel0}
 Q' = Q + A^\perp H_{r} = \left[ \br{cc}Q&A^\perp\er \right]
+\bq{mtxinv:460:perp0}
+ Q' = QI_r + A^\perp H_{r} = \left[ \br{cc}QI_r&A^\perp\er \right]
\eq
consists of
\bi
@@ 2508,33 +2643,45 @@ consists of
$mr$ columns on the right that give a complete orthogonal
complement (\S~\ref{mtxinv:450})~$A^\perp$ of~$A$.
\ei
Each column has unit magnitude and conveniently lies orthogonal to every
+Each column has unit magnitude and conveniently lies
+orthogonalindeed, orthonormalto every
other column, left and right.
Equation~(\ref{mtxinv:460:kernel0}) is probably the more useful form,
+\index{GramSchmidt orthogonalcomplement\\formula} % bad break
+\index{orthogonal complement!GramSchmidt formula for}
+Equation~(\ref{mtxinv:460:perp0}) is probably the more useful form,
but the
% bad break
\emph{Gram\linebreak Schmidt
kernel formula} as such,
\bq{mtxinv:460:kernel}
 A^{*K} = A^\perp = Q'H_rI_{mr},
\eq
extracts the rightward columns that express the kernel, not of~$A$, but
of~$A^{*}$. To compute the kernel of a matrix~$B$ by GramSchmidt one
sets $A=B^{*}$ and applies~(\ref{mtxinv:460:74})
through~(\ref{mtxinv:460:kernel}). Refer to~(\ref{mtxinv:450:30}).

In either the form~(\ref{mtxinv:460:kernel0}) or the
form~(\ref{mtxinv:460:kernel}), the GramSchmidt kernel formula does
everything the GaussJordan kernel formula~(\ref{mtxinv:245:kernel})
does and in at least one sense does it better; for, if one wants a
GaussJordan kernel orthonormalized, then one must orthonormalize it as
an extra step, whereas the GramSchmidt kernel comes already
orthonormalized.

Being square, the $m \times m$ matrix~$Q'$ is a unitary matrix, as the
last paragraph of \S~\ref{mtxinv:460.30} has alluded. The unitary
matrix is the subject of \S~\ref{mtxinv:465} that follows.
+orthogonalcomplement formula} as such is that
+\bq{mtxinv:460:perp}
+ A^{*K} = A^\perp = Q'H_rI_{mr}.
+\eq
+%extracts the rightward columns that express the kernel, not of~$A$, but
+%of~$A^{*}$.
+
+%In either the form~(\ref{}) or the
+%form~(\ref{}), the GramSchmidt kernel formula does
+%everything the GaussJordan kernel formula~(\ref{mtxinv:245:kernel})
+%does and in at least one sense does it better; for, if one wants a
+%GaussJordan kernel orthonormalized, then one must orthonormalize it as
+%an extra step, whereas the GramSchmidt kernel comes already
+%orthonormalized.
+
+\index{GramSchmidt kernel formula}
+\index{kernel!GramSchmidt formula for}
+The writer has not encountered a GramSchmidt kernel
+in the style of~(\ref{mtxinv:245:kernel}) to accompany
+the GramSchmidt orthogonal complement of
+\linebreak % bad break
+(\ref{mtxinv:460:perp})though
+if necessary one could maybe combine~(\ref{mtxinv:460:perp})
+with~(\ref{mtxinv:450:perp}) for the purpose. Instead, normally, as far
+as the writer knows, the GaussJordan~(\ref{mtxinv:245:kernel}) is used.
+Meanwhile however, the matrix~$Q'$ of this subsection is interesting.
+Being square and orthonormal, the $m \times m$ matrix~$Q'$ is a unitary
+matrix. Unitary matrices will be the subject of \S~\ref{mtxinv:465},
+next.
% 
@@ 2621,7 +2768,7 @@ vector's magnitude. To prove it, consid
\[
Q\ve x = \ve b.
\]
Multiplying the system by its own adjoint yields
+Multiplying the system by its own adjoint yields that
\[
\ve x^{*}Q^{*}Q\ve x = \ve b^{*}\ve b.
\]
@@ 2669,5 +2816,5 @@ here; for without the matrix's notation,
the chapter's findings would have lain beyond practical reach. And even
so, the single most interesting agent of matrix arithmetic remains yet
to be treated. This last is the eigenvalue, and it is the subject of
Ch.~\ref{eigen}, next.
+chapter~\ref{eigen}, next.
diff pruN 0.53.201204142/tex/noth.tex 0.56.20180123.12/tex/noth.tex
 0.53.201204142/tex/noth.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/noth.tex 20180114 19:51:29.000000000 +0000
@@ 41,7 +41,9 @@ one or two of its simplest, most broadly
\index{prime number!infinite supply of}
The first primes are evidently $2,3,5,7,\mbox{0xB},\ldots$\,. Is there
a last prime? To show that there is not, suppose that there were. More
+a last prime?
+
+To show that there is no last prime, suppose that there were. More
precisely, suppose that there existed exactly~$N$ primes, with~$N$
finite, letting $p_1,p_2,\ldots,p_N$ represent these primes from least
to greatest. Now consider the product of all the primes,
@@ 57,7 +59,7 @@ that $C+1$ itself is prime, or that $C+1
the series. But the latter is assumed impossible on the ground that the~$p_j$
series includes all primes; and the former is assumed impossible on
the ground that $C+1>C>p_N$, with~$p_N$ the greatest prime. The
contradiction proves false the assumption which gave rise to it. The
+contradiction proves false the assumption that gave rise to it. The
false assumption: that there were a last prime.
Thus there is no last prime. No matter how great a prime number one
@@ 65,7 +67,7 @@ finds, a greater can always be found. T
infinite.%
\footnote{\cite{Stepney}}
\index{Euclid (c.~300~B.C.)}
+\index{Euclid (325265~B.C.)}
\index{\emph{reductio ad absurdum}}
\index{proof!by contradiction}
\index{contradiction, proof by}
@@ 73,7 +75,7 @@ Attributed to the ancient geometer Eucli
classic example of mathematical \emph{reductio ad absurdum,} or as
usually styled in English, \emph{proof by contradiction.}%
\footnote{%
 \cite[Appendix~1]{Sagan}%
+ \cite[appendix~1]{Sagan}%
\cite[``Reductio ad absurdum,'' 02:36, 28 April 2006]{wikip}
}
@@ 182,7 +184,7 @@ Then,
That $Eq_1 = A_p$ says that~$q_1$ divides~$A_p$. But $A_p 0.
\]
The ratio is \emph{fully reduced} if~$p$ and~$q$ have no prime factors
in common. For instance,~$4/6$ is not fully reduced, whereas~$2/3$ is.
@@ 246,7 +248,7 @@ $\sqrt{n}$ which is not an integer but i
To prove%
\footnote{
A proof somewhat like the one presented here is found in
 \cite[Appendix~1]{Sagan}.
+ \cite[appendix~1]{Sagan}.
}
the last point, suppose that there did
exist a fully reduced
@@ 262,7 +264,7 @@ fully reduced $n=p^2/q^2$ is not an inte
was. The contradiction proves false the assumption which gave rise to
it. Hence there exists no rational, nonintegral $\sqrt{n}$, as was to
be demonstrated. The proof is readily extended to show that any
$x=n^{j/k}$ is irrational if nonintegral, the extension by writing
+$x=n^{j/k}$ is irrational if nonintegral, the extension by writing that
$p^k/q^k=n^j$ then following similar steps as those this paragraph
outlines.
@@ 300,7 +302,7 @@ where $Q_0(z)$ is the quotient and $R_0(
the divisor $A(z)=z\alpha$ has first order, and as
\S~\ref{alggeo:228.20} has observed, firstorder divisors leave
zerothorder, constant remainders $R_0(z) = \rho$. Thus substituting
yields
+yields that
\[
B(z) = (z\alpha)Q_0(z) + \rho.
\]
@@ 314,7 +316,10 @@ But $B(\alpha) = 0$ by assumption, so
\]
Evidently the division leaves no remainder~$\rho$, which is to say that
\emph{$z\alpha$ exactly divides every polynomial $B(z)$ of which
$z=\alpha$ is a root.}
+$z=\alpha$ is a root.}\footnote{%
+ See also~\cite[\S~5.86]{Shilov}, which reaches the same result in
+ nearly the same way.%
+}
Note that if the polynomial $B(z)$ has order~$N$, then the
quotient~$Q(z) = B(z)/(z\alpha)$ has exactly order~$N1$. That is, the
@@ 338,8 +343,8 @@ holds that any polynomial $B(z)$ of orde
where the~$\alpha_k$ are the~$N$ roots of the polynomial.%
\footnote{
Professional mathematicians typically state the theorem in a slightly
 different form. They also prove it in rather a different
 way.\ \cite[Ch.~10, Prob.~74]{Hildebrand}
+ different form. They also usually prove it in rather a different
+ way.\ \cite[chapter~10, Prob.~74]{Hildebrand}\cite[\S~5.85]{Shilov}
}
To prove the theorem, it suffices to show that all polynomials of order
@@ 395,6 +400,19 @@ e^{i\phi}) = 0$. Thus as we were requir
least one root, which observation completes the applied demonstration of
the fundamental theorem of algebra.
+\index{winding}
+(The purist might object that we have failed to prove that some trick
+does not exist whereby the~$N$ loops smoothly collapsed without passing
+through \emph{every} point within. The applicationist might reply that,
+on an applied level, such an objection were less than wholly serious,
+but anyway, here is at least one formal tactic by which one could rule
+out the possibility of a trick: as~$\rho$ shrinks, observing
+$\arg\{B[\rho e^{i\phi}]b_0\}$ as a function of~$\phi$, keep count of
+the net number of times the loops wind counterclockwise about the
+point~$B[0]=b_0$ as, given a particular value of~$\rho$, $\phi$ is let
+to sweep the domain $2\pi/2 < \phi \le 2\pi/2$. To fill details is
+left as an exercise to the interested reader.)
+
\index{quadratic expression}
\index{cubic expression}
\index{quartic expression}
@@ 403,7 +421,7 @@ The fact that the roots exist is one thi
numerically is another matter. For a quadratic (second order)
polynomial,~(\ref{alggeo:240:quad}) gives the roots. For cubic (third
order) and quartic (fourth order) polynomials, formulas for the roots
are known (see Ch.~\ref{cubic}) though seemingly not so for quintic
+are known (see chapter~\ref{cubic}) though seemingly not so for quintic
(fifth order) and higherorder polynomials;%
\footnote{\label{noth:320:fn20}%
In a celebrated theorem of pure mathematics \cite[``Abel's
@@ 422,6 +440,11 @@ The reverse problem, finding the polynom
much easier: one just multiplies out $\prod_j (z\alpha_j)$, as
in~(\ref{noth:320:50}).
+Incidentally, the reverse problem and its attendant multiplication show
+that an $N$thorder polynomial can have no other roots than the~$N$
+roots $z=\alpha_j$. Reason: the product $\prod_j (z\alpha_j)$ is
+nonzero for all other~$z$.
+
% 
\section{Addition and averages}
@@ 441,26 +464,26 @@ basic ways to calculate averages of them
\index{mason}
Consider the following problem. There are three masons. The
strongest and most experienced of the three, Adam, lays 120
+strongest and most experienced of the three, Adam, lays~60
bricks per hour.%
\footnote{
The figures in the example are in decimal notation.
}
Next is Brian who lays 90. Charles is new; he lays
only 60. Given eight hours, how many bricks can the three men lay?
+Next is Brian who lays~45. Charles is new; he lays
+only~30. Given eight hours, how many bricks can the three men lay?
Answer:
\[
 (8\ \mbox{hours})( 120 + 90 + 60\ \mbox{bricks per hour} )
 = 2160\ \mbox{bricks}.
+ (8\ \mbox{hours})( 60 + 45 + 30\ \mbox{bricks per hour} )
+ = 1080\ \mbox{bricks}.
\]
Now suppose that we are told that Adam can lay a brick every 30 seconds;
Brian, every 40 seconds; Charles, every 60 seconds. How much time do the
three men need to lay 2160 bricks? Answer:
+Now suppose that we are told that Adam can lay a brick every~60 seconds;
+Brian, every~80 seconds; Charles, every~120 seconds. How much time do the
+three men need to lay~1080 bricks? Answer:
\bqb
 \frac{2160\ \mbox{bricks}}{
 \frac{1}{30} +
 \frac{1}{40} +
 \frac{1}{60} \ \mbox{bricks per second}
+ \frac{1080\ \mbox{bricks}}{
+ \left(\frac{1}{60} +
+ \frac{1}{80} +
+ \frac{1}{120}\right) \:\mbox{bricks per second}
} &=& \mbox{28,800 \mbox{seconds}}
\left(\frac{1\ \mbox{hour}}{3600\ \mbox{seconds}}\right)
\\ &=& 8\ \mbox{hours}.
@@ 469,17 +492,17 @@ The two problems are precisely equivalen
terms than the other. The notation used to solve the second is less
elegant, but fortunately there exists a better notation:
\[
 (2160\ \mbox{bricks})(30 \,\\, 40 \,\\, 60\ \mbox{seconds per brick})
+ (1080\ \mbox{bricks})(60 \,\\, 80 \,\\, 120\ \mbox{seconds per brick})
= 8\ \mbox{hours},
\]
where
\[
\frac{1}{
 30 \,\\, 40 \,\\, 60
+ 60 \,\\, 80 \,\\, 120
} =
 \frac{1}{30} +
 \frac{1}{40} +
 \frac{1}{60}.
+ \frac{1}{60} +
+ \frac{1}{80} +
+ \frac{1}{120}.
\]
The operator~$\$ is called the \emph{parallel addition} operator. It
@@ 579,96 +602,71 @@ which means exactly what it appears to m
\index{average}
\index{mean}
\index{productivity}
\index{businessman}
\index{contract}
+%\index{productivity}
+%\index{businessman}
+%\index{contract}
Let us return to the problem of the preceding section. Among the three
masons, what is their average productivity? The answer depends on how you
look at it. On the one hand,
\[
 \frac{120 + 90 + 60\ \mbox{bricks per hour}}{3}
 = 90\ \mbox{bricks per hour}.
+ \frac{\left(60 + 45 + 30\right)\:\mbox{bricks per hour}}{3}
+ = 45\ \mbox{bricks per hour}.
\]
On the other hand,
\[
 \frac{30 + 40 + 60\ \mbox{seconds per brick}}{3}
 = 43{\textstyle\frac{1}{3}}\ \mbox{seconds per brick}.
+ \frac{\left(60 + 80 + 120\right)\:\mbox{seconds per brick}}{3}
+ = 86{\textstyle\frac{2}{3}}\ \mbox{seconds per brick}.
\]
These two figures are not the same. That is,
$1/(43{\textstyle\frac{1}{3}}\ \mbox{seconds per brick}) \neq
90\ \mbox{bricks per hour}$. Yet both figures are valid. Which
figure you choose depends on what you want to calculate. A common
mathematical error among businessmen seems to be to fail to realize that both
averages are possible and that they yield different numbers (if the
businessman quotes in bricks per hour, the productivities average one
way; if in seconds per brick, the other way; yet some businessmen
will never clearly consider the difference). Realizing this, the clever
businessman might negotiate a contract so that the average used
worked to his own advantage.%
\footnote{
 ``And what does the author know about business?'' comes the rejoinder.

 The rejoinder is fair enough. If the author wanted to demonstrate his
 business acumen (or lack thereof) he'd do so elsewhere not here!
 There are a lot of good business books out there and this is not one
 of them.

 The fact remains nevertheless that businessmen sometimes use
 mathematics in peculiar ways, making relatively easy problems harder
 and more mysterious than the problems need to be. If you have ever
 encountered the little monstrosity of an approximation banks (at least
 in the author's country) actually use in place of~(\ref{inttx:240:29})
 to accrue interest and amortize loans, then you have met the
 difficulty.

 Trying to convince businessmen that their math is wrong,
 incidentally, is in the author's experience usually a waste of time.
 Some businessmen are mathematically rather sharpas you presumably
 are if you are in business and are reading these wordsbut as for
 most: when real mathematical ability is needed, that's what they hire
 engineers, architects and the like for. The author is not sure, but
 somehow he doubts that many boards of directors would be willing to
 bet the company on a financial formula containing some
 mysteriouslooking~$e^x$. Business demands other talents.
}

\index{United States}
\index{American}
\index{House of Representatives}
\index{Representatives, House of}
\index{representative}
\index{seat}
\index{apportionment}
\index{Constitution of the United States}
\index{statute}
\index{population}
\index{republic}
+$1/(86{\textstyle\frac{2}{3}}\ \mbox{seconds per brick}) \neq
+45\ \mbox{bricks per hour}$. Yet both figures are valid. Which
+figure you choose depends on what you want to calculate.
+Will the masons lay bricks at the same time in different parts of the
+wall? Then choose the $45\ \mbox{bricks per hour}$.
+Will the masons lay bricks at different times in the same part of the
+wall? Then, especially if the masons have each equal numbers of bricks
+to lay, choose the $86{\textstyle\frac{2}{3}}\ \mbox{seconds per
+brick}$.
+
+%\index{United States}
+%\index{American}
+%\index{House of Representatives}
+%\index{Representatives, House of}
+%\index{representative}
+%\index{seat}
+%\index{apportionment}
+%\index{Constitution of the United States}
+%\index{statute}
+%\index{population}
+%\index{republic}
When it is unclear which of the two averages is more appropriate, a
third average is available, the \emph{geometric mean}
\[
 \left[(120)(90)(60)\right]^{1/3} \ \mbox{bricks per hour}.
+ \left[(60)(45)(30)\right]^{1/3} \ \mbox{bricks per hour}.
\]
The geometric mean does not have the problem either of the two
averages discussed above has. The inverse geometric mean
\[
 \left[(30)(40)(60)\right]^{1/3} \ \mbox{seconds per brick}
+ \left[(60)(80)(120)\right]^{1/3} \ \mbox{seconds per brick}
\]
implies the same average productivity. The mathematically savvy
sometimes prefer the geometric mean over either of the others for this
reason.%
% diagn: review the following footnote briefly once again.%
\footnote{
 The writer, an American, was recently, pleasantly surprised to learn
 that the formula his country's relevant federal statute stipulates to
 implement the Constitutional requirement that representation in the
 country's federal House of Representatives be apportioned by
 population actually, properly always apportions the next available
 seat in the House to the state whose \emph{geometric} mean of
 population per representative before and after apportionment would be
 greatest. Now, admittedly, the Republic does not rise or fall on the
 niceties of averaging techniques; but, nonetheless, some American who
 knew his mathematics was involved in the drafting of that statute!
}
+reason.
+%\footnote{
+% The writer, an American, was recently, pleasantly surprised to learn
+% that the formula his country's relevant federal statute stipulates to
+% implement the Constitutional requirement that representation in the
+% country's federal House of Representatives be apportioned by
+% population actually, properly always apportions the next available
+% seat in the House to the state whose \emph{geometric} mean of
+% population per representative before and after apportionment would be
+% greatest. Now, admittedly, the Republic does not rise or fall on the
+% niceties of averaging techniques; but, nonetheless, some American who
+% knew his mathematics was involved in the drafting of that statute!
+%}
+% On further investigation, it appears that the statute does not detail
+% the method, but the Census Bureau does?? See Title 2, Chapter 1,
+% U.S. Code.
\index{mean!arithmetic}
\index{arithmetic mean}
@@ 677,7 +675,7 @@ reason.%
\index{harmonic mean}
\index{mean!harmonic}
Generally, the \emph{arithmetic, geometric} and \emph{harmonic
means} are defined
+means} are defined to be
\bqa
\mu &\equiv& \frac{\sum_k w_kx_k}{\sum_k w_k} =
\left(\sum_k\\, \frac{1}{w_k}\right) \left(\sum_k w_kx_k\right),
@@ 698,7 +696,7 @@ For two samples weighted equally, these
\eqa
\index{proving backward}
If $a\ge 0$ and $b\ge 0$, then by successive steps,%
+If $a\ge 0$ and $b\ge 0$, then, by successive steps,%
\footnote{\label{noth:420:85}%
The steps are logical enough, but the motivation behind them remains
inscrutable until the reader realizes that the writer originally
diff pruN 0.53.201204142/tex/pref.tex 0.56.20180123.12/tex/pref.tex
 0.53.201204142/tex/pref.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/pref.tex 20180123 03:08:18.000000000 +0000
@@ 1,12 +1,8 @@
% 
% diagn: the preface, recently rewritten, wants careful review.
\chapter{Preface}
\label{preface}
[You are reading a prepublished draft of the book, dated on the title
page.]

I never meant to write this book. It emerged unheralded, unexpectedly.
The book began in 1983 when a highschool classmate challenged me to
@@ 17,29 +13,21 @@ kernel the notes grew over time, until f
the notes might make the material for the book you hold.
The book is neither a tutorial on the one hand nor a bald reference on
the other. It is a study reference, in the tradition of, for instance,
Kernighan's and Ritchie's \emph{The C Programming Language}~\cite{KR}.
In this book, you can look up some particular result directly, or you
can begin on page one and readwith toil and commensurate
profitstraight through to the end of the last chapter.

The reader who has come to look up a particular result will, I trust,
already have turned ahead to it, so let me here address the other
reader, who means to begin on page one. The book as a whole surveys the
general mathematical methods common to engineering, architecture,
chemistry and physics. The book thus is a marshal or guide. It
concisely arrays and ambitiously reprises the mathematics the practicing
scientist or engineer will likely once have met but may imperfectly
recall, deriving the mathematics it reprises, filling gaps in one's
knowledge while extending one's mathematical reach. For the
prospective, collegebound scientist or engineer, to the extent to which
study of the book proper is augmented by reflection, review, exercise
and application, the book offers a concrete means to earn an enduring
academic advantage.
+the other. The book is rather a \emph{study reference.} In this book,
+you can look up some particular result directly, or you can begin on
+page one and readwith toil and commensurate profitstraight through
+to the end of the last chapter.
+
+The book as a whole surveys the general mathematical methods common to
+engineering, architecture, chemistry and physics. As such, the book
+serves as a marshal or guide. It concisely arrays and ambitiously
+reprises the mathematics of the scientist and the engineer, deriving the
+mathematics it reprises, filling gaps in one's knowledge while extending
+one's mathematical reach.
Its focus on derivations is what principally distinguishes this book
from the few others%
\footnote{
+\footnote{%
Other books of the class include
\cite{Courant/Hilbert}\cite{Jeffreys/Jeffreys}\cite{Arfken/Weber}.
}
@@ 51,31 +39,30 @@ applications.
\subsubsection*{Plan}
Following its introduction in Ch.~\ref{intro} the book comes in three
+Following its introduction in chapter~\ref{intro} the book comes in three
parts. The first part begins with a brief review of classical algebra
and geometry and develops thence the \emph{calculus} of a single complex
variable, this calculus being the axle as it were about which higher
mathematics turns. The second part constructs the initially oppressive
but broadly useful mathematics of \emph{matrices} and \emph{vectors,}
without which so many modern applications (to the fresh incredulity of
each generation of college students) remain analytically
intractablethe jewel of this second part being the \emph{eigenvalue}
of Ch.~\ref{eigen}. The third and final part, the most interesting but
also the most advanced, introduces the the mathematics of the
\emph{Fourier transform, probability} and the \emph{wave
equation}each of which is enhanced by the use of \emph{special
functions,} the third part's unifying theme.
+mathematics turns. The second part laboriously constructs the broadly
+useful mathematics of \emph{matrices} and \emph{vectors,} without which
+so many modern applications (to the fresh incredulity of each generation
+of college students) remain analytically intractablethe jewel of this
+second part being the \emph{eigenvalue} of chapter~\ref{eigen}. The third
+and final part, the most interesting but also the most advanced,
+introduces the mathematics of the \emph{Fourier transform, probability}
+and the \emph{wave equation}each of which is enhanced by the use of
+\emph{special functions,} the third part's unifying theme.
Thus, the book's overall plan, though extensive enough to take several
hundred pages to execute, is straightforward enough to describe in a
single sentence. The plan is to derive as many mathematical
results, useful to engineers and their brethren, as possible in a
coherent train, recording and presenting the derivations together in an
orderly manner in a single volume. What constitutes ``useful'' or
``orderly'' is a matter of perspective and judgment, of course. My own
peculiar heterogeneous background in military service, building
construction, electrical engineering, electromagnetic analysis and
Debian development, my nativity, residence and citizenship in the United
+single sentence. The plan is to derive as many mathematical results,
+useful to scientists, engineers and the like, as possible in a coherent
+train, recording and presenting the derivations together in an orderly
+manner in a single volume. What constitutes ``useful'' or ``orderly''
+is a matter of perspective and judgment, of course. My own peculiar,
+heterogeneous background in military service, building construction,
+electrical engineering, electromagnetic analysis and software
+development, my nativity, residence and citizenship in the United
States, undoubtedly bias the selection and presentation to some degree.
How other authors go about writing their books, I do not know, but I
suppose that what is true for me is true for many of them also: we begin
@@ 92,39 +79,54 @@ one conspicuous respect which, I think,
book employs hexadecimal numerals.
Why not decimal only? There is nothing wrong with decimal numerals as
such. I am for them, retaining especially a partial regard for the
stately grandeur of the decimal numerals \textsc{mdclxvi} of the famous
Roman style. Decimal numerals are well in history and anthropology (man
+such. I am for them, whether in the Roman or the Arabic style.
+Decimal numerals are well in history and anthropology (man
has ten fingers), finance and accounting (dollars, cents, pounds,
shillings, pence: the base hardly matters), law and engineering (the
physical units are arbitrary anyway); but they are merely serviceable in
mathematical theory, never aesthetic.
%
Custom is not always defaced, but sometimes adorned, by the respectful
attendance of a prudent discrimination. It is in this spirit alone that
hexadecimal numerals are given place here.
+mathematical theory, never aesthetic. Custom is not always defaced, but
+sometimes adorned, by the respectful attendance of a prudent
+discrimination. It is in this spirit alone that hexadecimal numerals
+are given place here.
Admittedly, one might judge the last to be more excuse than cause. Yet,
though a dreary train of sophists down the years, impatient of
experience, eager to innovate, has indisputably abused such
causesin ways which the mature reader of a certain cast of mind will
find all too familiarsuch causes would hardly merit abuse did they
not sometimes hide a latent measure of justice. It is to the justice,
or at least to the aesthetic, rather than to the sophistry that I affect
to appeal here.
+experience, eager to innovate, has indisputably abused such causesin
+ways which the mature reader of a certain cast of mind will find all too
+familiarsuch causes would hardly merit abuse did they not sometimes
+hide a latent measure of justice. It is to the justice, or at least to
+the aesthetic, rather than to the sophistry that I affect to appeal
+here.
There unfortunately really is no gradual way to bridge the gap to
hexadecimal (shifting to base eleven, thence to twelve, etc., is no
use). If one wishes to reach hexadecimal ground then one must leap.
Twenty years of keeping my own private notes in hex have persuaded me
+Thirty years of keeping my own private notes in hex have persuaded me
that the leap justifies the risk. In other matters, by contrast, the
book leaps seldom. The book in general walks a tolerably conventional
applied mathematical line.
+\subsubsection*{Audience}
+
+Besides those who have opened this book only to look up some particular
+result (a numerous and honorable clan, but likely not reading this
+preface), the book's readers arrive in two principle corps. First come
+the engineers, architects, chemists and physicists who seek ideas
+toward, and logic to back, their analytical modeling of physical
+systems. Second come those ambitious students of calculus that want a
+broader, demand a deeper, and venture a terser treatment of the
+discipline than calculus textbooks usually afford.
+
+There are also some others. In a third corps come the economist and his
+brethren, who may find the book a little long on physics and,
+comparatively, slightly short on statistics, but still edifying perhaps.
+Whether a few students of pure mathematics make a fourth corps, hunting
+sketches to elaborate, remains to be seen.
+
\subsubsection*{Publication}
The book belongs to the emerging tradition of opensource software
where at the time of this writing it fills a void. Nevertheless it is a
+The book belongs to the emerging tradition of opensource software where
+at the time of this writing it fills a void. Nevertheless it is a
\emph{book,} not a program. Lore among opensource developers holds
that open development inherently leads to superior work. Well, maybe.
Often it does in fact. Personally with regard to my own work, I should
@@ 132,50 +134,32 @@ rather not make too many claims. It wou
professional editing and formal peer review, neither of which the book
enjoys, had substantial value. On the other hand, it does not do to
despise the \emph{amateur} (literally, one who does for the love of it:
not such a bad motive, after all\footnote{The expression is derived
from an observation I seem to recall George~F.\ Will making.}) on
principle, eitherunless one would on the same principle despise a
Socrates, a Washington, an Einstein or a Debian Developer~\cite{Debian}.
Open source has a spirit to it which leads readers to be far more
+not such a bad motive, after all\footnote{The expression is derived from
+an observation I seem to recall George~F.\ Will making.}) on principle,
+eitherunless one would on the same principle despise a Socrates, a
+Washington, an Einstein or a Debian Developer.%
+\footnote{%
+ \cite{Debian}
+}
+Open source has a spirit to it which leads readers to be more
generous with their feedback than ever could be the case with a
traditional, proprietary book. Such readers, among whom a surprising
concentration of talent and expertise are found, enrich the work freely.
This has value, too.
The book's opensource publication implies that it can neither go out of
print nor grow hard to find. You can indeed copy and distribute the
book yourself if you wish. Most readers naturally will be satisfied
merely to read the book, but the point is that other writers can refer
\emph{their} readers hither without undue fear of imposition, thus
saving scarce pages in the other writers' own books and articles, pages
otherwise devoted to mathematical appendices rather omitted. After all,
a reference one's reader can conveniently follow differs practically
from a reference one's reader can follow with difficulty if at all.

%How good or useful the book might be is not for me to say, but if you
%spend a few minutes flipping through the pages then the small likelihood
%that \emph{you} will choose to toss it summarily on the junkpile will
%not disturb me.

%I should answer a natural question regarding the book's opensource
%publication (if you don't have such a question, you can skip to the next
%section). My principal motive to let the book be so published is
%straightforward enough: to avoid relinquishing to a publisher direction
%of the book's distribution and marketing. A good publisher, of course,
%must know far more about distribution and marketing than I ever will;
%yet, insofar as such knowledge descends from experience in distributing
%and marketing similar books, insofar as \emph{this} book is
%insufficiently similar, insofar as the publisher assumes the financial
%risk of letting an unknown author deviate from established
%formulawell, the real test lies in this question: if I stood in the
%publisher's shoes, would I risk publishing this book? My answer would
%probably be \emph{no.} The reasons are plain enough. Whatever the
%book's technical and literary merits may be, the risk to the publisher
%is just too great. The risk to its author is greater, for the
%probability that a publisher who cannot even classify the book would
%market it properly seems small. No, it cannot work that way for this
%book. This book shall have to find its market without a conventional
%publisher's help.
+print nor grow hard to find. If desired you could, if expedient you
+should, copy, archive and distribute the book yourself, without further
+permission than the book's license already grants%
+\footnote{%
+ \cite{GPL}
+}%
+though as a courtesy to your own readers and to this writer you might
+publish the book's electronic address, \emph{derivations.org,} along
+with the book. Naturally, merely to read the book will quite satisfy
+most readers, but the point is this: the authors of other works can
+refer \emph{their} readers hither without thereby setting their readers
+off on a quest for some obscure or costly quarry.
\subsubsection*{Edition}
@@ 199,41 +183,164 @@ generally sets in italic type. Within t
by the number of the section that states it.
The book subjoins an alphabetical index as a standard convenience. Even
so, the canny reader will avoid using the index (of this and most other
+so, the canny reader will avoid using the index (of this and other
books), which alone of the book's pages is not to be regarded as a
proper part of the book. Such a reader will tend rather to consult the
book's table of contents which is a proper part.
The book includes a bibliography listing works I have referred to while
writing. This is as it should be. Mathematics however by nature
promotes queer bibliographies, for its methods and truths are
established by derivation rather than authority. Much of the book
consists of common mathematical knowledge or of proofs I have worked out
with my own pencil from various ideas gleanedwho knows from
where?over the years. The latter proofs are perhaps original or
semioriginal from my personal point of view but it is unlikely that
many if any of them are truly new. To the initiated, the mathematics
itself often tends to suggest the form of the proof: if to me, then
surely also to others who came before; and even where a proof is new the
idea proven probably is not.
+The book includes a bibliography listing works to which I have referred
+while writing. However, mathematics by its nature promotes queer
+bibliographies, for its methods and truths are established by derivation
+rather than by authority. A mathematical bibliography may thus less appeal
+to the works it lists than simply afford those works due credit.
+
+Of course, not every point in the book is backed by a bibliographic
+citation of any kind. Some of the book consists of common mathematical
+knowledge or even of proofs I have worked out with my own pencil from
+various ideas gleanedwho knows from where?over the years. The
+latter proofs are perhaps original or semioriginal from my personal
+point of view but it is unlikely that many if any of them are truly new.
+To the initiated, the mathematics itself often tends to suggest the form
+of the proof: if to me, then surely also to others who came before; and
+even where a proof is new the idea proven is probably not.
+
+\subsubsection*{Philosophy}
+
+Speaking of ideas and proofs: an idea is one thing, but what precisely
+constitutes a \emph{proof?}
+
+%\index{Kant, Immanuel (17241804)}
+%\index{Weierstrass, Karl Wilhelm Theodor (18151897)}
+%\index{Hilbert, David (18621943)}
+%\index{Lewis, C.~S.\ (18981963)}
+Modern pure mathematics tends to make one shy of the question. To me at
+least, a mathematical proof remains what an earlier era once
+unsuspectingly held it to be: it remains a morally convincing appeal to
+man's faculty of logic, geometry and number (though I study not to focus
+the reader's attention, unprofitably to the book's purpose, on any such
+metadefinition). Neither in this book nor elsewhere do I wish to
+deconstruct, reconstruct, lay bare, replace, supersede or explain away
+the faculty so named. Indeed, towering figures like Kant, Weierstrass
+and Hilbert notwithstanding, C.~S.\ Lewis (18981963) speaks for me
+when he writes:
+\begin{quote}
+ You cannot go on ``seeing through'' things for ever. The whole point
+ of seeing through something is to see something through it. It is
+ good that the window should be transparent, because the street or
+ garden beyond it is opaque. How if you saw through the garden too?
+ It is no use trying to ``see through'' first principles. If you see
+ through everything, then everything is transparent. But a wholly
+ transparent world is an invisible world. To ``see through'' all
+ things is the same as not to see.~\cite[chapter~3]{Lewis:abolition}
+\end{quote}
+Such are my sympathies.
+
+%\index{Plato (428348~B.C.)}
+%\index{Weyl, Hermann (18851955)}
+%\index{G\"odel, Kurt Friedrich (19061978)}
+%\index{Frege, Friedrich Ludwig Gottlob (18481925)}
+%\index{Courant, Richard (18881972)}
+Would the Kantian era in which we live countenance it, the book should
+sooner merely have let pure mathematics' abstract foundations lie
+undisturbed. However, since the era probably will not countenance it,
+chapter~\ref{intro} engages the question briefly but soberly, after
+which other chapters touch the question as necessary. When
+philosophically put to it, the book tends less to follow Kant,
+Weierstrass or Hilbert in spirit than Plato, Frege, Weyl and G\"odel%
+\footnote{%
+ Readers who know the subject well may note the omission of the
+ important name of Richard Dedekind (18311916) from these two lists.
+ However, in which of the two would you name Dedekind? It is no easy
+ questionnor is it a question this book will tackle. As respectable
+ as Dedekind is, this book does not especially follow him,
+ anyway.~\cite{Sieg/Schlimm}\cite{Toader}
+
+ One could further mention several other respectable names%
+ Georg Cantor's (18451918),
+ Bertrand Russell's (18721970) and
+ L.~E.~J.\ Brouwer's (18811966),
+ for instance, after the name of the great
+ Carl Friedrich Gauss (17771855)%
+ and one could bring the lists generally more up to date, but we
+ will leave the matter there.
+
+ To date the names listed:
+ Plato (428348~B.C.);
+ Immanuel Kant (17241804);
+ Karl Weierstrass (18151897);
+ Gottlob Frege (18481925);
+ David Hilbert (18621943);
+ Hermann Weyl (18851955);
+ Richard Courant (18881972);
+ Kurt G\"odel (19061978).
+}
+(though there remains the peculiar matter of ``the CourantHilbertShilov
+perspective,'' of which more will be said). That is in spirit. In
+method, with little further apology, the book follows the timehonored
+practice of the working scientist and engineer.
+
+% diagn: review this new subsubsection
+\subsubsection*{Reliance}
+
+I hope that the book harbors no more errors than other books of the
+kind do. I hope that the book harbors fewer. Having revised the
+book's manuscript (or the notes from which the manuscript is
+drawn) over a period of~30 years, I believe that the book's results are
+correct in the main.
+
+Nevertheless, the book gives reasons the reader can evaluate. The book
+details steps the reader can check. The book illuminates patterns the
+reader can study. The book teaches principles the reader can absorb.
+To look up a result in the book without evaluating, checking, studying
+or absorbing might not always be an unreasonable risk to run when stakes
+are small and time is short, but application of the book's results must
+in every case remain the responsibility of the applicationist.
+
+\subsubsection*{A curiosity}
+
+To my surprise, the later parts of the book, which treat more advanced
+topics, have been the easier to write. By contrast, to explain the mere
+basics of singlevariable calculus and complex numbers in the book's
+part~\ref{part10} has been unexpectedly harder. An extra effort has
+been required there (which however I have not omitted to make).
+
+The matter need not concern the reader, yet it is curious, is it not?
+One never quite knows which parts of a book will prove the more
+difficult to write until one has sat down to write them.
\subsubsection*{Acknowledgements}
+%\subsection*{Completion}
+%
+%For~35 years, 1983 to 2018and even for the last~12 of those years, from
+%2006 when I first granted the public access to the bookI cautiously labeled
+%the book a prepublished draft. From Jan.~15, 2018, however, I have given the
+%book as a proper edition. Version 1.1 (there was no 1.0) was released on that
+%date.
+%
+%Applied mathematics is unbounded. Therefore, in some sense, \emph{Derivations
+%of Applied Mathematics} may never be complete. I still mean to add to the book
+%from time to time. Nevertheless, by~2018, the bookhaving been more
+%thoroughly revised, more carefully checked, and more widely read than a
+%commercially published proprietary book will typically have been at \emph{its}
+%initial publicationseemed complete enough for its intended reader's purpose.
+%The book's part~III still wants more sections and chapters, if I ever find time
+%to write them, but such a want should no longer bar the present publication.
Among the bibliography's entries stands a
reference~\cite{Brownconversation} to my doctoral adviser G.S.~Brown,
though the book's narrative seldom explicitly invokes the reference.
Prof.~Brown had nothing directly to do with the book's development, for
a substantial bulk of the manuscript, or of the notes underlying it, had
been drafted years before I had ever heard of Prof.~Brown or he of me,
and my work under him did not regard the book in any case. However, the
ways in which a good doctoral adviser influences his student are both
complex and deep. Prof.~Brown's style and insight touch the book in
many places and in many ways, usually too implicitly coherently to cite.
+\subsubsection*{Acknowledgements}
Steady encouragement from my wife and children contribute to the book
in ways only an author can properly appreciate.
+The style and insight of Prof.\ G.~S.\ Brown~\cite{Brownconversation}
+and of Mr.\ D.~E.\ Davis~\cite{DEDavisconversation} implicitly touch
+the book in many places and in many ways.
+
+Steady encouragement from my wife and children contribute to the book in
+ways only an author can properly appreciate.
+
+% diagn: review this paragraph
+For a preface to state that the book's shortcomings remain the author's
+own seems \emph{de rigueur.} The statement remains true, too, though,
+so I am glad for this chance to state it.
More and much earlier than to Prof.~Brown or to my wife and children,
+More and much earlier than to Prof.~Brown, to Mr.~Davis,
+or to my wife and children,
the book owes a debt to my mother and, separately, to my father, without
either of whom the book would never have come to be. Admittedly, any
author of any book might say as much in a certain respect, but it is no
@@ 242,14 +349,16 @@ author's expressions of filial piety. N
respect that I lay the matter here. My mother taught me at her own hand
most of the mathematics I ever learned as a child, patiently building a
foundation that cannot but be said to undergird the whole book today.
% diagn: Check the next sentence for the final draft.
More recently, my mother has edited tracts of the book's manuscript.
My father generously financed much of my formal education butmore
than thisone would have had to grow up with my brother and me in my
+My father generously financed much of my formal education butmore than
+thisone would have had to grow up with my brother and me in my
father's home to appreciate the grand sweep of the man's curiosity, the
general depth of his knowledge, the profound wisdom of his practicality
and the enduring example of his love of excellence.
+% The next, if uncommented, is an especially bad break.
+% bad break
+%\pagebreak
+
May the book deserve such a heritage.
\nopagebreak
diff pruN 0.53.201204142/tex/prob.tex 0.56.20180123.12/tex/prob.tex
 0.53.201204142/tex/prob.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/prob.tex 20180123 23:56:29.000000000 +0000
@@ 2,15 +2,12 @@
\label{prob}
\index{probability}
[This chapter is still quite a rough draft.]

\index{statistics}
Of all mathematical fields of study, none may be so counterintuitive
yet, paradoxically, so widely applied as that of probability, whether
+and yet so widely applied as that of probabilitywhether
as \emph{probability} in the technical term's conventional, restricted
meaning or as probability in its expanded or inverted guise as
\emph{statistics.}%
\footnote{
+\emph{statistics.}\footnote{%
The nomenclature is slightly unfortunate. Were statistics called
``inferred probability'' or ``probabilistic estimation'' the name
would suggest something like the right taxonomy. Actually, the
@@ 21,11 +18,11 @@ meaning or as probability in its expande
opposed to the statistics (plural noun) mean and standard deviation of
\S~\ref{prob:070}as such lies mostly beyond this book's scope, but
the chapter will have at least a little to say about it in
 \S~\ref{prob:200}.
+ \S~\ref{prob:200}.%
}
Man's mind, untrained, seems somehow to rebel against the concept.
Sustained reflection on the concept however gradually reveals to the
mind a fascinating mathematical landscape.
+The untrained mind seems to rebel against the concept.
+Nevertheless, sustained reflection upon the concept gradually reveals
+a fascinating mathematical landscape.
\index{thumb}
\index{uncertainty}
@@ 38,17 +35,18 @@ degree of uncertainty in the length. Pr
\emph{statistics} is the mathematics which produces, analyzes and
interprets such quantities.
\index{American male}
+\index{U.S.\ male}
\index{height}
More obviously statistical is a report that the average,
say,~25yearold American male is~$70 \pm 3$ inches tall, inferred from
actual measurements of some number $N>1$ of~25yearold American males.
+More obviously statistical is a report that, say, the
+average~25yearold U.S.\ male is~$69 \pm 3$ inches tall, inferred from
+actual measurements\footnote{\cite[Tables~6 and~12]{CDC}}
+of some number $N>1$ of~25yearold U.S.\ males.
Deep mathematics underlie such a report, for the report implies among
other things that a little over
twothirds$(1/\sqrt{2\pi})\int_{1}^{1}\exp(\tau^2/2)\,d\tau \approx
\mbox{0x0.AEC5}$, to be preciseof a typical, randomly chosen sample
of~25yearold American males ought to be found to have heights
between~67 and~73 inches.
+of~25yearold U.S.\ males ought to be found to have heights
+between~66 and~72 inches.
\index{game of chance}
\index{card}
@@ 61,27 +59,30 @@ observation but on a teleological assump
assumption of symmetry such as that no face of a die or card from a deck
ought to turn up more often than another. Entertaining so plausible an
assumption, if you should draw three cards at random from a
standard~52card deck (let us use decimal notation rather than
hexadecimal in this paragraph, since neither you nor I have ever heard
of a~0x34card deck), the deck comprising four cards each of thirteen
+standard 52card\footnote{Decimal notation is used here.} deck,
+the deck comprising four cards each of thirteen
ranks, then there would be some finite probabilitywhich is
$(3/51)(2/50) = 1/425$that the three cards you draw would share the
+$(3/51)(2/50) = 1/425$that the three cards drawn would share the
same rank (why?). If I should however shuffle the deck, draw three
cards off the top, and look at the three cards without showing them to
you, all before inviting you to draw three, then the probability that
your three would share the same rank were again $1/425$ (why?). On the
other hand, if before you drew I let you peek at my three hidden cards,
and you saw that they were ace, queen and ten, this knowledge alone must
slightly lower your estimation of the probability that your three would
subsequently share the same rank as one another to $(40/49)(3/48)(2/47)
+other hand, if before you drew I let you \emph{peek} at my three hidden cards,
+and you saw that my three hidden cards
+were ace, queen and ten, then this knowledge alone must
+slightly lower your estimate of the probability that your three would
+subsequently share the same rank to $(40/49)(3/48)(2/47)
+ (9/49)(2/48)(1/47) \approx 1/428$ (again, why?).
\index{trial}
+\index{expectation}
That the probability should be $1/425$ suggests that one would draw
three of the same rank once in~425 tries. However, were I to
shuffle~425 decks and you to draw three cards from each, then you
\emph{might} draw three of the same rank from two, three or four decks,
or from none at all, though very unlikely from twenty decksso what
+three of the same rank once in~425 tries. That is, were I to
+shuffle~425 decks and you to draw three cards from each, then
+for you to draw three of the same rank from just one of the~425
+decks would be expected.
+Nevertheless, despite expectations, you \emph{might} draw three of the
+same rank from two, three or four decks, or from none at allso what
does a probability of $1/425$ really mean? The answer is that it
means something like this: were I to shuffle~425 \emph{million} decks
then you would draw three of the same rank from very nearly~1.0 million
@@ 92,17 +93,40 @@ exactly upon $1/425$ as the number of tr
See also \S~\ref{drvtv:220}.
Other than by this brief introduction, the book you are reading is not
+% It should not really be necessary to tie \emph{a~priori,} but in a
+% text like this, if it breaks after the "a," the italic "a" looks too
+% much like a variable $a$.
+\index{climate}
+\index{die}
+\index{probability!empirical}
+\index{probability!\emph{a priori}}
+\index{empirical probability}
+\index{\emph{a priori} probability}
+If unsure, consider this. Suppose that during six days in an unfamiliar
+climate, in a place you had never before visited, it rained twice. Then
+suppose that during six throws of a sixsided die, a single pip came up
+twice. What would you conclude about the climate? What would you
+conclude about the die? See, these are different cases.
+
+Regarding the climate, the best one can do might be to suppose
+\emph{empirically} that, on average, it rained two days out of every
+six; whereas one should probably assume \emph{a~priori} that, on average,
+a single pip were to come up one throw out of every six. For the die,
+one would regard the twothrow observation to represent but a random
+fluctuation.
+
+Cases of either kind can be quantitatively analyzed. This chapter
+mostly (though not solely) analyzes cases of the \emph{a~priori}
+kind, cases like that of the die.
+
+Other than by the brief introduction you are reading, this book is not
well placed to offer a gentle tutorial in probabilistic thought.%
\footnote{
 The late R.W. Hamming's fine book~\cite{Hamming} ably fills such a
 role.
+\footnote{%
+ R.~W.\ Hamming's \cite{Hamming} ably fills such a role.%
}
What it does offer, in the form of the present chapter, is the discovery
and derivation of the essential mathematical functions of probability
theory (including in \S~\ref{prob:100} the derivation of one critical
result undergraduate statistics textbooks usually state but,
understandably, omit to prove), plus a brief investigation of these
+and derivation of some of the essential mathematical functions of
+probability theory, plus a brief investigation of these
functions' principal properties and typical use.
% 
@@ 141,7 +165,7 @@ where the event of interest is that the
fall%
\footnote{
This sentence and the rest of the section condense somewhat lengthy
 tracts of an introductory collegiate statistics text
+ tracts of an introductory collegiate statistics textbook
like~\cite{Walpole/Myers}\cite{Alder/Roessler}\cite{Lindgren}\cite{Rosenkrantz},
among others. If the sentence and section make little sense to you
then so likely will the rest of the chapter, but any statistics text
@@ 149,13 +173,13 @@ fall%
a mathematical gap than a conceptual one. Or, if defiant, you can
stay here and work through the concepts on your own.
}
in the interval%
+within the interval%
\footnote{
We might as well have expressed the interval $a < x < b$ as $a \le x
\le b$ or even as $a \le x < b$, except that such notational niceties
would distract from the point the notation means to convey. The
notation in this case is not really interested in the bounding points
 themselves. If \emph{we} are interested the bounding points, as for
+ themselves. If \emph{we} are interested in the bounding points, as for
example we would be if $f(x) = \delta(x)$ and $a=0$, then we can
always write in the style of $P_{(0^{})b}$, $P_{(0^{+})b}$,
$P_{(a\ep)(b+\ep)}$, $P_{(a+\ep)(b\ep)}$ or the like. We can even be
@@ 179,10 +203,10 @@ The \emph{quantile} $F^{1}(\cdot)$ inve
\bq{prob:quantile}
F^{1}[F(x)] = x,
\eq
generally calculatable by a NewtonRaphson iteration~(\ref{drvtv:NR}) if
+generally calculable by a NewtonRaphson iteration~(\ref{drvtv:NR}) if
by no other means.
\index{probability!that both of two, independent events will occur}
+\index{probability!that both of two independent events will occur}
\index{probability density function!of a sum of random variables}
\index{convolution}
It is easy enough to see that the product
@@ 190,14 +214,17 @@ It is easy enough to see that the produc
P = P_1P_2
\eq
of two probabilities composes the single probability that not just one
but both of two, independent events will occur. Harder to see, but just
+but both of two independent events will occur. Harder to see, but just
as important, is that the convolution
\bq{prob:050:40}
f(x) = f_1(x) \ast f_2(x)
\eq
of two probability density functions composes the single probability
density function of the sum of two random variables $x=x_1+x_2$, where,
per Table~\ref{fouri:110:tbl41},
+density function of the sum of two random variables
+\bq{prob:050:41}
+ x=x_1+x_2,
+\eq
+where, per Table~\ref{fouri:110:tbl41},
\[
f_2(x) \ast f_1(x) = f_1(x) \ast f_2(x) \equiv
\int_{\infty}^{\infty} f_1\left(\frac{x}{2}\tau\right)
@@ 211,30 +238,28 @@ That is, if you think about it in a cert
\lim_{\ep \ra 0^{+}} \sum_{k=\infty}^{\infty}
\bigg\{
\bigg[
 \int_{(k1/2)\ep}^{(k+1/2)\ep} f_1(x) \,dx
 \bigg] \bigg[
 \int_{ak\ep}^{bk\ep} f_2(x) \,dx
+ \int_{ak\ep}^{bk\ep} f_1(x) \,dx
+ \bigg]
+ \bigg[
+ \int_{(k1/2)\ep}^{(k+1/2)\ep} f_2(x) \,dx
\bigg]
\bigg\}
\\&=&
\lim_{\ep \ra 0^{+}} \sum_{k=\infty}^{\infty}
\bigg\{
\bigg[
 \ep f_1(k\ep)
 \bigg] \bigg[
 \int_{a}^{b} f_2(xk\ep) \,dx
+ \int_{a}^{b} f_1(xk\ep) \,dx
+ \bigg]
+ \bigg[
+ \ep f_2(k\ep)
\bigg]
\bigg\}
\\&=&
 \int_{\infty}^{\infty} f_1(\tau) \bigg[
 \int_{a}^{b} f_2(x\tau) \,dx
 \bigg] \,d\tau
+ \int_{\infty}^{\infty} \bigg[
+ \int_{a}^{b} f_1(x\tau) \,dx
+ \bigg] f_2(\tau) \,d\tau
\\&=&
 \int_{a}^{b} \left[ \int_{\infty}^{\infty} f_1(\tau) f_2(x\tau) \,d\tau \right] \,dx
 \\&=&
 \int_{a}^{b} \left[ \int_{\infty}^{\infty}
 f_1\left(\frac x2 + \tau\right)
 f_2\left(\frac x2  \tau\right) \,d\tau \right] \,dx
+ \int_{a}^{b} \left[ \int_{\infty}^{\infty} f_1(x\tau) f_2(\tau) \,d\tau \right] \,dx
\\&=&
\int_{a}^{b} \left[ \int_{\infty}^{\infty}
f_1\left(\frac x2  \tau\right)
@@ 274,14 +299,27 @@ and standard deviation are \emph{statist
\footnote{
Other statistics than the mean and standard deviation are possible,
but these two are the most important ones and are the two this book
 treats.
+ will treat.
}
When the chapter's introduction proposed that the average~25yearold
American male were~$70 \pm 3$ inches tall, it was saying that his height
+When for example the chapter's introduction proposed that the average~25yearold
+U.S.\ male were~$69 \pm 3$ inches tall, it was saying that his height
could quantitatively be modeled as a random variable drawn from a
distribution whose statistics are $\mu = 70$ inches and $\sigma =
+distribution whose statistics are $\mu = 69$ inches and $\sigma =
3$ inches.
+The first line of~(\ref{prob:stat}), defining~$\mu$, might seem obvious
+enough, but one might ask why $\sigma$ had not instead been defined to
+be $\langle  x  \langle x \rangle  \rangle$. Would that not have
+been more obvious? One answer is\footnote{The writer does not know the
+original, historical answer.} that, yes, it might have been more obvious
+but it would not have been analytic (\S\S~\ref{alggeo:225.3}
+and~\ref{taylor:320}). Another answer is that one likes to regard long
+excursions from the mean more seriously than short ones. A third answer
+is that the second line of~(\ref{prob:stat}) comports with the elegant
+mathematics of least squares and MoorePenrose (\S~\ref{mtxinv:320}).
+Whatever the answer,~(\ref{prob:stat}) is the definition conventionally
+used.
+
% 
\section{The sum of random variables}
@@ 393,7 +431,7 @@ If this is rightas indeed it isthe
variables together not only adds the means of the variables' respective
distributions according to~(\ref{prob:070:20}) but also, according
to~(\ref{prob:070:30}), adds the squares of the standard deviations. It
follows directly that, if~$N$ independent instances $x_1, x_2, \ldots,
+follows inductively that, if~$N$ independent instances $x_1, x_2, \ldots,
x_N$ of a random variable are drawn from the same distribution
$f_o(x_k)$, the distribution's statistics being~$\mu_o$ and~$\sigma_o$,
then the statistics of their sum $x = \sum_{k=1}^N x_k = x_1 + x_2 +
@@ 426,7 +464,7 @@ is itself a random variable obeying the
\bq{prob:080:10}
f(x) = \left. \frac{f_o(x_o)}{\leftdg/dx_o\right} \right_{x_o = g^{1}(x)}.
\eq
Another, suaver way to write the same thing is
+Another, suaver way to write the same thing is as that
\bq{prob:080:11}
f(x) \leftdx\right = f_o(x_o) \leftdx_o\right.
\eq
@@ 435,7 +473,12 @@ perspective, but can in any case be supp
\[
\int_{a}^{b} f_o(x_o) \,dx_o
= \left \int_{g(a)}^{g(b)} f_o(x_o) \frac{dx_o}{dx} \,dx \right
 = \int_{g(a)}^{g(b)} f_o(x_o) \left\frac{dx_o}{dg}\right \,dx.
+ = \int_{g(a)}^{g(b)} f_o(x_o) \left\frac{dx_o}{dg}\right \,dx
+\]
+since, on the other hand
+\[
+ \int_{a}^{b} f_o(x_o) \,dx_o
+ = \int_{g(a)}^{g(b)} f(x) \,dx.
\]
\index{random variable!scaling of}
@@ 443,24 +486,25 @@ perspective, but can in any case be supp
\index{probability density function!flattening of}
\index{flattening}
One of the most frequently useful transformations is the simple
\[
+\bq{prob:080:48}
x \equiv g(x_o) \equiv \alpha x_o,
 \ \ \mbox{$\Im(\alpha) = 0$, $\Re(\alpha) > 0$.}
\]
For this, evidently $dg/dx_o = \alpha$, so according
+ \ \ \mbox{$\Im(\alpha) = 0$.}
+\eq
+For this, evidently $dg/dx_o = \alpha$ or $dx = \alpha dx_o$, so according
to~(\ref{prob:080:10}) or~(\ref{prob:080:11})
\bq{prob:080:49}
f(x) = \frac{1}{\left\alpha\right} f_o\left(\frac{x}{\alpha}\right).
\eq
If $\mu_o=0$ and $\sigma_o=1$, then $\mu=0$ and, in train
of~(\ref{prob:stat}),
+If $\mu_o=0$ and $\sigma_o=1$, then $\mu=0$ and,
+applying~(\ref{prob:080:11}) in train of~(\ref{prob:stat}),
\[
\sigma^2
= \int_{\infty}^{\infty} f(x) x^2 \,dx
= \int_{\infty}^{\infty} f_o(x_o) (\alpha x_o)^2 \,dx_o
= \alpha^2;
\]
whereby $\sigma=\alpha$ and we can rewrite the transformed PDF as
+whereby $\sigma=\alpha$ and, if $\alpha > 0$, one can rewrite the
+transformed PDF as
\bq{prob:080:50}
f(x) = \frac{1}{\sigma} f_o\left(\frac{x}{\sigma}\right)\ %
\mbox{and $\mu = 0$, if $\mu_o = 0$ and $\sigma_o = 1$.}
@@ 468,7 +512,7 @@ whereby $\sigma=\alpha$ and we can rewri
Assuming null mean,~(\ref{prob:080:50}) states that the act of scaling a
random variable flattens out the variable's distribution and scales its
standard deviation, all by the same factorwhich, naturally, is what
one would expect it to do.
+one would expect such an act to do.
% 
@@ 478,11 +522,11 @@ one would expect it to do.
\index{distribution!normal}
\index{$\Omega$ as the Gaussian pulse}
Combining the ideas of \S\S~\ref{prob:074} and~\ref{prob:080} leads us
now to ask whether a distribution does not exist for which, when
+Combining the ideas of \S\S~\ref{prob:074} and~\ref{prob:080} can lead one
+to ask whether a zeromean distribution does not exist for which, when
independent random variables drawn from it are added together, \emph{the
sum obeys the same distribution,} only the standard deviations
differing. More precisely, we should like to identify a distribution
+differing. More precisely, the ideas can lead one to seek a distribution
\[
\mbox{$f_o(x_o)$: $\mu_o = 0$, $\sigma_o = 1$;}
\]
@@ 498,7 +542,7 @@ as~(\ref{prob:080:50}) suggests, then
\[
x = x_1 + x_2
\]
by construction is a random variable drawn from the distribution
+is by construction a random variable drawn from the distribution
\[
f(x) = \frac{1}{\sigma} f_o\left(\frac{x}{\sigma}\right),
\]
@@ 514,42 +558,60 @@ Gaussian pulse $\Omega(x_o)$ of \S\S~\re
recommends itself. This works. The distribution $f_o(x_o) =
\Omega(x_o)$ meets our criterion.
+\subsection{Proof}
+\label{prob:100.10}
+\index{normal distribution!proof of}
+\index{distribution!normal, proof of}
+
To prove that the distribution $f_o(x_o) = \Omega(x_o)$ meets our
criterion we shall first have to show that it is indeed a distribution
+criterion we shall have first to show that it is indeed a distribution
according to~(\ref{prob:050:10}). Especially, we shall have to
demonstrate that
\[
\int_{\infty}^{\infty} \Omega(x_o) \,dx_o = 1.
\]
Fortunately, as it happens, we have already demonstrated this fact as
% bad break
\linebreak
(\ref{fouri:130:60}); so, since $\Omega(x_o)$ evidently meets the
other demands of~(\ref{prob:050:10}), it apparently is a proper
distribution. That $\mu_o = 0$ for $\Omega(x_o)$ is obvious by symmetry.
That $\sigma_o = 1$ is shown by
+Fortunately as it happens we have already demonstrated this fact
+in an earlier chapter, while working on Fourier transforms, as
+(\ref{fouri:130:60}). The function $\Omega(x_o)$ had interested us
+during the earlier chapter because it is an analytic function that
+autotransforms, so now in this chapter we observe that, since $\Omega(x_o)$
+evidently meets the other demands
+of~(\ref{prob:050:10}), $\Omega(x_o)$ is apparently indeed also a proper
+distribution, whatever its other properties might be.
+That $\mu_o = 0$ for $\Omega(x_o)$ is obvious by symmetry. That
+$\sigma_o = 1$ is shown by
\bqb
 \int_{\infty}^{\infty} \Omega(x_o) x_o^2 \,dx_o
 &=&
+ \lefteqn{
+ \sigma^2 \equiv
+ \int_{\infty}^{\infty} \Omega(x_o) x_o^2 \,dx_o
+ } \qquad\quad &&
+ \\ &=&
\frac{1}{\sqrt{2\pi}}
\int_{\infty}^{\infty} \exp\left(\frac{x_o^2}{2}\right) x_o^2 \,dx_o
\\&=&
+ \frac{1}{\sqrt{2\pi}}
+ \int_{\infty}^{\infty}
+ \left\{\bigg[\mbox{$\ds x_o$}\bigg]\bigg[\mbox{$\ds x_o\exp\left(\frac{x_o^2}{2}\right)dx_o$}\bigg]\right\}
+ \\&=&
\frac{1}{\sqrt{2\pi}}
 \int_{\infty}^{\infty} x_o \,d\left[\exp\left(\frac{x_o^2}{2}\right)\right]
+ \int_{x_o=\infty}^{\infty} x_o \,d\left[\exp\left(\frac{x_o^2}{2}\right)\right]
\\&=&
 \left.\frac{x_o \exp\left(x_o^2/2\right)}{\sqrt{2\pi}}\right_{\infty}^{\infty}
 \\&&\ \ \ \ \mbox{}
+ \makebox[3pt][c]{}\left.\frac{x_o \exp\left(x_o^2/2\right)}{\sqrt{2\pi}}\right_{\infty}^{\infty}
+\frac{1}{\sqrt{2\pi}}\int_{\infty}^{\infty}
 \exp\left(\frac{x_o^2}{2}\right) \,dx_o,
+ \exp\left(\frac{x_o^2}{2}\right) \,dx_o
\\&=&
0 + \int_{\infty}^{\infty} \Omega(x_o) \,dx_o,
\eqb
from which again according to~(\ref{fouri:130:60})
+the integration via the byparts method of \S~\ref{inttx:230},
+the result according to
+% bad break
+(\ref{fouri:130:60}) that
\bq{prob:100:10}
 \int_{\infty}^{\infty} \Omega(x_o) x_o^2 \,dx_o = 1
+ \sigma^2 \equiv \int_{\infty}^{\infty} \Omega(x_o) x_o^2 \,dx_o = 1,
\eq
as was to be shown. Now having justified the assertions that
$\Omega(x_o)$ is a proper distribution and that its statistics are $\mu_o
+implying that $\sigma=1$ as was to be shown. Now having justified the
+assertions that $\Omega(x_o)$ is a proper distribution and that its
+statistics are $\mu_o
= 0$ and $\sigma_o = 1$, all that remains to be proved
per~(\ref{prob:050:40}) is that
\bq{prob:100:20}
@@ 562,9 +624,9 @@ per~(\ref{prob:050:40}) is that
\end{split}
\eq
which is to prove that the sum of Gaussian random variables is itself
Gaussian. We will prove it in the Fourier domain of Ch.~\ref{fouri} as
follows. According to Tables~\ref{fouri:110:tbl20}
and~\ref{fouri:120:tbl20}, and to~(\ref{prob:normdist}),
+Gaussian. We will prove it in the Fourier domain of chapter~\ref{fouri} as
+follows. According to Tables~\ref{fouri:110:tbl20}, \ref{fouri:110:tbl41}
+and~\ref{fouri:120:tbl25}, and to~(\ref{prob:normdist}),
\bqb
\lefteqn{
\left[\frac{1}{\sigma_1}\Omega\left(\frac{x_o}{\sigma_1}\right)\right]
@@ 584,34 +646,36 @@ and~\ref{fouri:120:tbl20}, and to~(\ref{
\right\}
\\&=&
\mathcal{F}^{1}\left\{
 \frac{\exp\left[\sigma_1^2 x_o^2/2\right]
 \exp\left[\sigma_2^2 x_o^2/2\right]}{\sqrt{2\pi}}
+ \frac 1{\sqrt{2\pi}} \exp\left[\frac{\sigma_1^2 x_o^2}2\right]
+ \exp\left[\frac{\sigma_2^2 x_o^2}2\right]
\right\}
\\&=&
\mathcal{F}^{1}\left\{
 \frac{\exp\left[\left(\sigma_1^2+\sigma_2^2\right) x_o^2/2\right]}
 {\sqrt{2\pi}}
+ \frac 1{\sqrt{2\pi}} \exp\left[\frac{\left(\sigma_1^2+\sigma_2^2\right) x_o^2}2\right]
\right\}
\\&=&
\mathcal{F}^{1}\left\{
\Omega\left[\left(\sqrt{\sigma_1^2+\sigma_2^2}\right)x_o\right]
\right\}
\\&=&
 \frac{1}{\sqrt{\sigma_1^2+\sigma_2^2}}
+ \frac{1}{\sqrt{\sigma_1^2+\sigma_2^2}}\,
\Omega\left(\frac{x_o}{\sqrt{\sigma_1^2+\sigma_2^2}}\right),
\eqb
the last line of which is~(\ref{prob:100:20}) in other notation, thus
completing the proof.
+\subsection{Plots and remarks}
+\label{prob:100.40}
+
\index{Gauss, Carl Friedrich (17771855)}
\index{Gaussian pulse}
\index{pulse, Gaussian}
+\index{pulse!Gaussian}
\index{normal distribution}
\index{distribution!normal}
\index{Gaussian distribution}
\index{distribution!Gaussian}
\index{bell curve}
In the Fourier context of Ch.~\ref{fouri} one usually names
+In the Fourier context of chapter~\ref{fouri} one usually names
$\Omega(\cdot)$ the \emph{Gaussian pulse,} as we have seen. The function
$\Omega(\cdot)$ turns out to be even more prominent in probability theory
than in Fourier theory, however, and in a probabilistic context it
@@ 704,12 +768,12 @@ function~(\ref{prob:CDF}).
\index{normal distribution!cumulative distribution function of}
\index{cumulative distribution function!of the normal distribution}
\index{cumulative distribution function!numerical calculation of}
+\index{cumulative normal distribution function}
Regarding the cumulative normal distribution function, one way to
calculate it numerically is to integrate the normal distribution's
Taylor series term by term. As it happens, \S~\ref{inttx:450} has
worked a very similar integral as an example, so this section will not
repeat the details, but the result
is
+worked a similar integral as an example, so this section will not
+repeat the details, but the result is that
\bqa
F_\Omega(x_o) = \int_{\infty}^{x_o} \Omega(\tau) \,d\tau
&=&
@@ 728,8 +792,88 @@ correctis practical only for small an
\index{distribution!default}
The normal distribution tends to be the default distribution in applied
mathematics. When one lacks a reason to do otherwise, one models a
random quantity as a normally distributed random variable. See
\S~\ref{prob:300} for the reason.
+random quantity as a normally distributed random variable.
+Section~\ref{prob:300} tells more.
+
+\subsection{Motive}
+\label{prob:100.60}
+\index{motive!to posit the normal distribution}
+\index{normal distribution!motive to posit}
+
+\index{position}
+Equation~(\ref{prob:normdist}) seems almost unfair to posit. Once the
+equation has been posited, the proof follows, the proof validating the
+position; so the logic is valid, but why posit the equation in the
+first place?\footnote{This subsection is optional reading for the benefit of
+the curious. You can skip it without burdening the rest of the book.}
+
+One answer is that the equation~(\ref{prob:normdist}) is not really all
+that obscure. To study expressions that resemble $\exp(x^2/2)$ for
+their own sakes is neither unreasonable nor especially unlikely. Some
+mathematician or other must probably, eventually have thought to try
+such an expression against the logic of \S~\ref{prob:100.10}. One he
+had tried it and had shown us his result, we would know to posit it.
+
+The last paragraph's answer is actually a pretty good answer. We should not be
+embarrassed to give it. Much of mathematics goes that way, after all.
+
+Nevertheless, an alternate answer is known. Suppose that~$N$ coins are tossed
+%\footnote{%
+% Will the tossed coin remain familiar to future generations of
+% readers? Probably so, but the expression has the ring of approaching
+% obsolescence even as it is written. Nonetheless, ``tossed coin''
+% remains current at this writing.
+%
+% A certain degree of obsolescence is a thing readers of old books must
+% always encounter, isn't it? Whether or not you would still speak of a
+% tossed coin in your day, the image it conveys ought to be clear enough.
+%
+% There is a regrettable tendency among too many readers of any
+% generation to smile callowly at the antiquities of old authors. Such
+% readers are apt to forget that, during the old authors' generation,
+% \emph{they had yet older authors to smile at, too.} Why, during the old
+% author's generation, they even had older authors more recent than,
+% say, the scribes of the Sumerian clay tablets, hard though that may be
+% to believe. Maybe they even had a few older authors who, for all their
+% failure to anticipate the discoveries, trends and fashions of
+% centuries to come, are smarter and wiser than you.
+%
+% Old authors are hardly a new thing.
+%
+% If you think your generation exceptional, well, not only are you likely
+% mistaken but worse, you imagine even your \emph{mistake} to be new!
+% No, such conceit is as old as the hills. Now, between the two of us,
+% across the gulf of years, who is smiling at whom, eh? I never knew
+% you, but see the arrow of time: it points both ways.%
+%}
+and that~$2m$ is the number of heads in excess of the number of tails (for
+example, if~6 heads and~2 tails, then $2m=62=4$ and $N=6+2=8$).
+According to the combinatorics of \S~\ref{drvtv:220},
+\[
+ f(m) = \cmb{N}{[N+2m]/2} = \frac{N!/[(N2m)/2]!}{[(N+2m)/2]!}
+\]
+computes the probability that~$m$ will have a given value.
+
+Since we are merely motivating, we need not be precise, so approximately,
+\bqb
+ \lefteqn{\frac{d}{dm}\ln f(m) = \frac{df/dm}{f(m)}} &&
+ \\&\approx& \frac{f(m+1)f(m1)}{2f(m)}
+ \\&\approx& \frac 1 2 \left[\frac{f(m+1)}{f(m)}  \frac{f(m1)}{f(m)}\right]
+ \\&\approx& \frac 1 2 \left[\frac{(N2m)/2}{(N+2m+2)/2}  \frac{(N+2m)/2}{(N2m+2)/2}\right]
+ \\&\approx& \frac 1 2 \left[\frac{12m/N}{1+(2m+2)/N}  \frac{1+2m/N}{1(2m+2)/N}\right]
+ \\&\approx& \frac 1 2 \left[\frac{(8m4)/N}{1[(2m+2)/N]^2}\right]
+ \approx \frac 1 2 \left[\frac{8m4}N\right]\left[1+\left(\frac{2m+2}N\right)^2\right]
+ \\&\approx& \frac 1 2 \left[\frac{8m}N\right] = \frac{4m}{N}.
+\eqb
+Changing $x \la m$ and $\alpha \la 4/N$,
+\[
+ \frac{d}{dx}\ln f(x) = \frac{df/dx}{f(x)} \approx \alpha x.
+\]
+A function that does this is
+\[
+ f(x) \approx C\exp(\alpha x^2/2),
+\]
+which motivates~(\ref{prob:normdist}).
% 
@@ 740,13 +884,14 @@ random quantity as a normally distribute
\index{estimation of statistics}
\index{sample}
+\index{instance}
Suppose that several, concrete instances of a random
variablecollectively called a \emph{sample}were drawn from a
distribution $f(x)$ and presented to you, but that you were not told the
shape of $f(x)$. Could you infer the shape?
+variablethe instances collectively called a \emph{sample}were
+drawn from a distribution $f(x)$ and presented to you, but that you were
+not told the shape of $f(x)$. Could you infer the shape?
The answer is that you could infer the shape with passable accuracy
provided that the number~$N$ of samples were large. Typically however
+provided that the number~$N$ of instances were large. Typically however
one will be prepared to make some assumption about the shape such as
that
\bq{prob:200:20}
@@ 756,111 +901,204 @@ which is to assume that~$x$ were normall
statistics~$\mu$ and~$\sigma$. The problem then becomes to infer the
statistics from the sample.
+\subsection{Inference of the mean}
+\label{prob:200.20}
\index{mean!inference of}
In the absence of additional information, one can hardly hardly suppose
much about the mean other than that
+
+In the absence of additional information, one can hardly suppose
+much regarding the mean other than that
\bq{prob:200:mean}
\mu \approx \frac{1}{N}\sum_k x_k.
\eq
One infers the mean to be the average of the instances one has observed.
One might think to infer the standard deviation in much the same way
except that to calculate the standard deviation directly according
to~(\ref{prob:stat}) would implicate our imperfect
+One infers the mean to be the average of the instances observed.
+
+\subsection{An imputed ensemble}
+\label{prob:200.25}
+\index{ensemble}
+\index{imputed ensemble}
+
+One might na\"ively think to infer a standard deviation in much the same
+way as \S~\ref{prob:200.20} has inferred a mean, except that to
+calculate the standard deviation directly according to~(\ref{prob:stat})
+would implicate our imperfect
estimate~(\ref{prob:200:mean}) of the mean. If we wish to estimate the
standard deviation accurately from the sample then we shall have to
proceed more carefully than that.
\index{standard deviation!inference of}
It will simplify the standarddeviational analysis to consider the
shifted random variable
\[
 u_k = x_k  \mu_{\mr{true}}
\]
instead of~$x_k$ directly, where~$\mu_{\mr{true}}$ is not the estimated
mean of~(\ref{prob:200:mean}) but the true, unknown mean of the hidden
distribution $f(x)$. The distribution of~$u$ then is
$f(u+\mu_{\mr{true}})$, a distribution which by construction has zero
mean. (Naturally, we do not knowwe shall never knowthe actual
value of~$\mu_{\mr{true}}$, but this does not prevent us from
representing~$\mu_{\mr{true}}$ symbolically during analysis.) We shall
presently find helpful the identities
+Section~\ref{prob:200.30} will estimate the standard deviation after
+the subsection you are reading has prepared the ground on which to do
+it. To prepare the ground, let us now define the shifted random variable
+\[
+ u \equiv x  \mu_{\mr{true}}
+\]
+in lieu of the random variable~$x$, where~$\mu_{\mr{true}}$ \emph{is not
+the estimated mean} of~(\ref{prob:200:mean}) but is the true, unknown
+mean of the hidden distribution $f(x)$such that an instance~$u_k$ of
+the random variable~$u$ is in no way independent of, but is rather
+wholly dependent on, the corresponding instance~$x_k$ of the random
+variable~$x$; but also, paradoxically, such that the exact value
+of~$u_k$ remains unknown even if the exact value of~$x_k$ is known.
+And why does the exact value of~$u_k$ remain unknown? It remains
+unknown because the separation~$\mu_{\mr{true}}$ (which from the present
+perspective is no random variable but a fixed number) between~$u_k$
+and~$x_k$ remains unknown. At any rate, the distribution of~$u$ is
+\[
+ f_u(u) \equiv f(u+\mu_{\mr{true}}),
+\]
+a distribution which by construction \emph{is known to have zero true
+mean,}
+\[
+ \langle u \rangle = 0,
+\]
+even though the standard deviation~$\sigma_\mr{true}$ the two
+distributions $f(x)$ and $f_u(u)$ share remains unknown.
+%(Naturally, we do not knowwe shall never knowthe actual
+%value of the true mean~$\mu_{\mr{true}}$ of the hidden distribution
+%$f[\cdot]$, any more than we shall ever know the actual
+%shape of $f[\cdot]$ and $f_u[\cdot]$,
+%but this lack of knowledge will not prevent us from
+%representing~$\mu_{\mr{true}}$ symbolically during analysis.)
+
+Statistical reasoning is tricky, isn't it? No? Quite straightforward,
+you say? Good, let us continue.
+
+Regarding not any particular sample of~$N$ instances but a conceptually
+infinite \emph{ensemble} of samples, each sample consisting of~$N$ instances,
+two identities the standarddeviational analysis of \S~\ref{prob:200.30}
+will be able to use are that
\[
\begin{split}
 \left\langle \sum_k u_k^2 \right\rangle &= N\sigma^2, \\
 \left\langle {\sum_k}^2 u_k \right\rangle &= N\sigma^2,
+ \left\langle \sum_k u_k^2 \right\rangle &= N\sigma_{\mr{true}}^2, \\
+ \left\langle {\sum_k}^2 u_k \right\rangle &= N\sigma_{\mr{true}}^2,
\end{split}
\]
the first of which is merely a statement of the leftward part of
(\ref{prob:stat})'s second line with respect to the unknown distribution
$f(u+\mu_{\mr{true}})$ whose mean $\langle u \rangle$ is null by
construction, the second of which considers the sum $\sum_k u_k$ as a
random variable whose mean again is null but whose standard
+where~$\sigma_{\mr{true}}$ is (as the notation suggests and as an
+earlier paragraph has observed) the
+true, unknown standard deviation of the hidden distribution $f(x)$ and
+thus also of the shifted distribution $f_u(u)$.
+The first of the two identities is merely a statement of the leftward part of
+(\ref{prob:stat})'s second line with respect to the distribution
+$f_u(u)$ whose mean $\langle u \rangle = 0$ is, as we said, known to be
+zero despite that the distribution itself remains unknown.
+The second of the two identities
+considers the sum $\sum_k u_k$ itself as a
+random variable whose mean again is zero but whose standard
deviation~$\sigma_\Sigma$ according to~(\ref{prob:070:30}) is such that
$\sigma_\Sigma^2 = N\sigma_{\mr{true}}^2$.
With the foregoing definition and identities in hand, let us construct
from the available sample the quantity
+\index{observation}
+Admittedly, one might wonder how we can
+speak sensibly of an ensemble when no
+concrete ensemble is to be observed. Observation, after all, sees
+only the one sample of~$N$ instances. However, we have assumed that a
+hidden distribution $f(x)$ exists and that the several instances~$x_k$,
+which \emph{are} observed, have been drawn from it. Our assumption
+might be \emph{wrong,} of coursein general this is very difficult to
+judgebut we have assumed it and the assumption has
+consequences. Among the consequences is that $f(x)$ possesses
+statistics~$\mu_{\mr{true}}$ and~$\sigma_{\mr{true}}$.
+We do not knowwe shall never knowthe right values of these
+statistics; but our assumption implies that they do exist and do have
+values, values one can and should write symbols to represent.
+
+Section~\ref{prob:200.30} will employ the symbols, next.
+
+\subsection{Inference of the standard deviation}
+\label{prob:200.30}
+\index{standard deviation!inference of}
+
+With the definitions and identities of \S~\ref{prob:200.25} in hand, let
+us construct from the available sample the quantity
\bqb
\left(\sigma'\right)^2
\equiv
\frac 1 N \sum_k\left(
 x_k  \frac 1 N \sum_k x_k
+ x_k  \frac 1 N \sum_\ell x_\ell
\right)^2,
\eqb
which would tend to approach~$\sigma_{\mr{true}}^2$ as~$N$ grew
arbitrarily large but which, unlike~$\sigma_{\mr{true}}$, is a quantity
we can actually compute for any $N > 1$. By successive steps,
+modeled on~(\ref{prob:stat}). Evidently,
+\[
+ \lim_{N\ra\infty} \sigma' = \sigma_{\mr{true}}.
+\]
+However, unlike the~$\sigma_{\mr{true}}$, the~$\sigma'$ is a quantity we
+can actually compute from an observed sample. Let the sample consist of
+$N > 1$ instances. By successive steps,
\bqb
\left(\sigma'\right)^2
&=&
\frac 1 N \sum_k\left(
 [u_k+\mu_{\mr{true}}]  \frac 1 N \sum_k [u_k+\mu_{\mr{true}}]
+ [u_k+\mu_{\mr{true}}]  \frac 1 N \sum_\ell [u_\ell+\mu_{\mr{true}}]
\right)^2
\\&=&
\frac 1 N \sum_k\left(
 u_k  \frac 1 N \sum_k u_k
+ u_k  \frac 1 N \sum_\ell u_\ell
\right)^2
\\&=& \frac 1 N
\sum_k\left(
 u_k^2  \frac 2 N u_k \sum_k u_k
 + \frac 1{N^2} {\sum_k}^2 u_k
+ u_k^2  \frac 2 N u_k \sum_\ell u_\ell
+ + \frac 1{N^2} {\sum_\ell}^2 u_\ell
\right)
\\&=&
\frac 1 N \sum_k u_k^2
+ \frac 2{N^2} {\sum_k} u_k {\sum_\ell} u_\ell
+ +\frac 1{N^2} {\sum_\ell}^2 u_\ell
+ \\&=&
+ \frac 1 N \sum_k u_k^2
\frac 2{N^2} {\sum_k}^2 u_k
+\frac 1{N^2} {\sum_k}^2 u_k
\\&=&
\frac 1 N \sum_k u_k^2
\frac 1{N^2} {\sum_k}^2 u_k,
\eqb
the expected value of which is
+the expected value of which over a conceptually infinite
+ensemble of samples (each sample consistingas explained by
+\S~\ref{prob:200.25}of an equal number~$N>1$ of
+instances) is
\[
\left\langle \left(\sigma'\right)^2 \right\rangle
=
\frac 1 N \left\langle \sum_k u_k^2 \right\rangle
 \frac 1{N^2} \left\langle {\sum_k}^2 u_k \right\rangle,
+ \frac 1{N^2} \left\langle {\sum_k}^2 u_k \right\rangle.
\]
Applying the identities of the last paragraph,
+Applying the identities of \S~\ref{prob:200.25},
\[
\left\langle \left(\sigma'\right)^2 \right\rangle
 = \sigma^2  \frac{\sigma^2}{N}
 = \frac{N1}{N} \sigma^2,
+ = \sigma_{\mr{true}}^2  \frac{\sigma_{\mr{true}}^2}{N}
+ = \frac{N1}{N} \sigma_{\mr{true}}^2,
\]
from which
\[
 \sigma^2 = \frac{N}{N1} \left\langle \left(\sigma'\right)^2 \right\rangle.
+ \sigma_{\mr{true}}^2 = \frac{N}{N1} \left\langle \left(\sigma'\right)^2 \right\rangle.
\]
Because the expected value $\langle(\sigma')\rangle$ is not a quantity
+Because the expectation $\langle(\sigma')\rangle$ is not a quantity
whose value we know, we can only suppose that $\langle(\sigma')^2\rangle
\approx \sigma^2$, whereby
+\approx (\sigma')^2$, whereby
\[
 \sigma^2 \approx \frac{N}{N1} \left(\sigma'\right)^2,
+ \sigma_{\mr{true}}^2 \approx \frac{N}{N1} \left(\sigma'\right)^2.
\]
and, substituting the definition of $(\sigma')^2$ into the last equation,
+Substituting the definition of $(\sigma')^2$ into the last equation and
+changing symbols $\sigma \la \sigma_{\mr{true}}$, we have that
\bq{prob:200:stdev}
\sigma^2 \approx
\frac 1{N1} \sum_k\left(
 x_k  \frac 1 N \sum_k x_k
+ x_k  \frac 1 N \sum_\ell x_\ell
+ \right)^2.
+\eq
+This~$\sigma^2$ is apparently a little greater (though, provided
+that~$N$ is sufficiently large, not much greater) than a na\"ive
+assumption that~$\sigma$ equalled~$\sigma'$ would have supposed.
+
+Notice according~(\ref{prob:200:stdev}) that~$\sigma$,
+unlike~$\sigma'$, infers a standard deviation only when the sample
+includes at least two instances! Indeed,~$\sigma$ is sensible to
+do so, for the one case in which a na\"ive analysis
+were right would be when the true mean $\mu_{\mr{true}}$ were for some
+reason \emph{a~priori} exactly known, leaving only the standard
+deviation to be inferred. In such a case,
+\bq{prob:200:37}
+ \sigma^2 \approx
+ \frac 1 N \sum_k\left(
+ x_k  \mu_{\mr{true}}
\right)^2.
\eq
@@ 868,35 +1106,44 @@ and, substituting the definition of $(\s
\index{sample statistic}
The estimates~(\ref{prob:200:mean}) and~(\ref{prob:200:stdev}) are known
as \emph{sample statistics.} They are the statistics one imputes to an
unknown distribution based on the incomplete information of $N > 1$
samples.
+unknown distribution based on the incomplete information a sample of $N
+> 1$ instances affords.
+
+\subsection{Correlation and its inference}
+\label{prob:200.40}
+\index{correlation}
+\index{correlation!inference of}
\index{independence}
\index{dependence}
\index{correlation}
\index{correlation coefficient}
\index{Pfufnik, Gorbag}
This chapter generally assumes independent random variables when it
speaks of probability. In statistical work however one must sometimes
handle correlated quantities like the height and weight of a~25yearold
American malefor, obviously, if I point to some 25yearold over
there and say, ``That's Pfufnik. The average is~160 pounds, but he
+\index{Pfufnik, Gorbag~J.}
+The chapter you are reading has made some assumptions, not all of which
+it has explicitly stated, or at any rate not all of which it has fully
+developed. One assumption the chapter has made is that instances of its
+random variables have been \emph{independent.}
+In statistical work however one must sometimes handle \emph{correlated}
+quantities like the height and weight of a~25yearold
+U.S.\ malefor, obviously, if I point to some 25yearold over
+there and say, ``That's Pfufnik. The average is~187 pounds, but he
weighs~250!'' then your estimate of his probable height will change,
because height and weight are not independent but correlated. The
conventional statistical measure%
\footnote{\cite[\S~9.9]{Walpole/Myers}\cite[eqns.~126 and~1214]{Alder/Roessler}}
of the correlation of a series $(x_k,y_k)$ of pairs of data, such as
+of the correlation of a sample of~$N$ pairs $(x_k,y_k)$ of data, such as the
$([\mbox{height}]_k,[\mbox{weight}]_k)$ of the example, is the
\emph{correlation coefficient}
\bq{prob:070:60}
r \equiv \frac{
\sum_k (x_k\mu_x) (y_k\mu_y)
}{
 \sqrt{\sum_k (x_k\mu_x)^2 \sum_k (x_y\mu_y)^2}
+ \sqrt{\sum_k (x_k\mu_x)^2 \sum_k (y_k\mu_y)^2}
},
\eq
a unitless quantity whose value is~$\pm 1$, indicating perfect
correlation, when $y_k = x_k$ or even when $y_k = a_1 x_k + a_0$; but
+a unitless quantity whose value is~$\pm 1$the~$\pm 1$ indicating perfect
+% bad break
+cor\re\la\tionwhen
+$y_k = x_k$ or even $y_k = a_1 x_k + a_0$; but
whose value should be near zero when the paired data are unrelated. See
Fig.~\ref{mtxinv:320:fig1} for another example of the kind of paired
data in whose correlation one might be interested: in the figure, the
@@ 904,13 +1151,38 @@ correlation would be~$+1$ if the points
(Beware that the conventional correlation coefficient of
eqn.~\ref{prob:070:60} can overstate the relationship between paired
data when~$N$ is small. Consider for instance that $r=\pm 1$ always
when $N=2$. The coefficient as given nevertheless is conventional.)
+when $N=2$. The coefficient as given is nevertheless conventional.)
+
+That $r = \pm 1$ when
+$
+ y_k = a_1 x_k + a_0
+$
+is seen by observing that
+\bqb
+ y_k\mu_y
+ &=&
+ (a_1) \left[ x_k\frac{\mu_ya_0}{a_1} \right]
+ =
+ (a_1) \left[ x_k\frac{\left(\sum_\ell y_\ell\right)/Na_0}{a_1} \right]
+ \\&=&
+ (a_1) \left[ x_k\frac{\left(\sum_\ell a_1x_\ell\right)/N}{a_1} \right]
+ =
+ (a_1) \left[ x_k\mu_x \right],
+\eqb
+which, when substituted into~(\ref{prob:070:60}), yields the stipulated
+result.
+
+\subsection{Remarks}
+\label{prob:200.90}
If further elaborated, the mathematics of statistics rapidly grow much
+If further elaborated, the mathematics of statistics rapidly grows much
more complicated. The book will not pursue the matter further but will
mention that the kinds of questions that arise tend to involve the
statistics of the statistics themselves, treating the statistics as
random variables. Such questions confound two, separate uncertainties:
+mention that the kinds of questions that arise can, among others, involve the
+statistics of the statistics themselves, treating the \emph{statistics} as
+random variables. Section~\ref{prob:200.30} has done this just a bit.
+The book will avoid doing more of it.
+
+Such questions confound two, separate uncertainties:
the uncertainty inherent by definition~(\ref{prob:050:10}) in a random
variable even were the variable's distribution precisely known; and the
uncertain knowledge of the distribution.%
@@ 918,16 +1190,40 @@ uncertain knowledge of the distribution.
The subtle mathematical implications of this far exceed the scope of
the present book but are developed to one degree or another in
numerous collegiate statistics texts of
 which~\cite{Walpole/Myers}\cite{Alder/Roessler}\cite{Lindgren}\cite{Rosenkrantz}
+ which \cite{Bulmer}\cite{Walpole/Myers}\cite{Alder/Roessler}\cite{Lindgren}\cite{Rosenkrantz}
are representative examples.
}
Fortunately, if $N \gg 1$, one can usually tear the two uncertainties
+Fortunately, if $N \gg 1$, then one can usually tear the two uncertainties
from one another without undue violence to accuracy, pretending that one
knew the unknown~$\mu$ and~$\sigma$ to be exactly the
+knew the unknown statistics~$\mu$ and~$\sigma$ to have exactly the
values~(\ref{prob:200:mean}) and~(\ref{prob:200:stdev}) respectively
calculate, supposing that the distribution were the
+calculate for them, supposing that the distribution were the
normal~(\ref{prob:200:20}), and modeling on this basis.
+\index{Student, a statistician}
+\index{Bulmer, M.~G.}
+\index{Fisher, R.~A.\ (18901962)}
+Unfortunately, that $N \gg 1$ is not so for many samples of practical
+interest. As the biologist M.~G.\ Bulmer recounts,
+\begin{quote}
+ `Student' had found, however, that in his practical work for Guinness'
+ brewery he was often forced to deal with samples far too small for the
+ customary large sample approximations to be applicable. It was
+ gradually {r}{e}{a}{l}{i}{s}{e}{d} after the publication of his paper,
+ and of R.~A.\ Fisher's papers on other problems in small sample
+ theory, that if the sample were large enough the answer to any
+ question one might ask would be obvious, and that it was only in the
+ case of small and moderatesized samples that any statistical problem
+ arose.~\cite[chapter~9]{Bulmer}
+\end{quote}
+Notwithstanding, the book you are reading will delve no further into the
+matter, but will turn attention back to probabilistic topics. It is
+worth noting before we turn, however, that it took a pair of
+biologists`Student' and Fisherto broaden the relevant mathematics.
+Professional mathematicians might never have discovered the right
+direction in which to explore on their own. See
+\S\S~\ref{conclu:athwart} and~\ref{conclu:klein}.
+
% 
\section{The random walk and its consequences}
@@ 935,9 +1231,10 @@ normal~(\ref{prob:200:20}), and modeling
\index{random walk}
\index{walk, random}
This section brings overview, insight and practical perspective. It
also analyzes the simple but often encountered statistics of a series of
allornothingtype attempts.
+\index{attempt, allornothing}
+\index{allornothing attempt}
+This section analyzes the simple but oftencountered statistics of a series
+of allornothing attempts.
\subsection{The random walk}
\label{prob:300.10}
@@ 945,14 +1242,14 @@ allornothingtype attempts.
\index{walk, random}
\index{Sands, Matthew}
\index{Feynman, Richard~P. (19181988)}
+\index{Feynman, Richard~P.\ (19181988)}
\index{lecture}
\index{coin}
Matthew Sands gave a famous lecture~\cite[\S\S~I:6]{Feynman}, on
probability, on behalf of Richard~P. Feynman at Caltech in the fall
of~1961. The lecture is a classic and is recommended to every reader of
this chapter who can conveniently lay hands on a printed
copyrecommended among other reasons because it lends needed context
+Matthew Sands gave a famous lecture~\cite[\S~I:6]{Feynman} to freshmen in
+physics, on probability, on behalf of Richard~P. Feynman at Caltech in the fall
+of~1961. The lecture is a classic and is recommended to every reader
+who can conveniently lay hands on a
+copyrecommended among other reasons because the lecture lends needed context
to the rather abstruse mathematics this chapter has presented to the
present point. One section of the lecture begins, ``There is [an]
interesting problem in which the idea of probability is required. It is
@@ 963,34 +1260,48 @@ the problem of the `random walk.' In it
\emph{randomly,} determined, for example, by the toss of a coin. How
shall we describe the resulting motion?''
\index{straying}
\index{likely straying}
+\index{stray}
+\index{likely stray}
Sands goes on to observe that, though one cannot guess whether the
`player' will have gone forward or backward after~$N$ stepsand,
+``player'' will have gone forward or backward after~$N$ stepsand,
indeed, that in the absence of other information one must expect
$\langle D_N \rangle = 0$, zero net progress``[one has] the feeling
+that $\langle D_N \rangle = 0$, indicating zero expected
+net progress``[one has] the feeling
that as~$N$ increases, [the `player'] is likely to have strayed farther
from the starting point.'' Sands is right, but if $\langle D_N \rangle$
is not a suitable measure of this ``likely straying,'' so to speak, then
+is not a suitable measure of this ``likely stray,'' so to speak, then
what would be?
The measure $\langle\left D_N \right\rangle$ might recommend itself,
but this being nonanalytic (\S\S~\ref{alggeo:225.3}
and~\ref{taylor:320}) proves inconvenient in practice (you can try it if
you like). The success of the leastsquares technique of
\S~\ref{mtxinv:320} however encourages us to try the measure $\langle
D_N^2 \rangle$. The squared distance~$D_N^2$ is nonnegative in every
instance and also is analytic, so its expected value $\langle D_N^2
\rangle$ proves a most convenient measure of ``likely straying.'' It is
moreover a measure universally accepted among scientists and engineers,
and it is the measure this book will adopt.

Sands notes that, if the symbol $D_{N1}$ represents the `player's'
position after $N1$ steps, his position after~$N$ steps must be
+The measure $\langle\left D_N \right\rangle$ might recommend itself
+but this, being nonanalytic (\S\S~\ref{alggeo:225.3}
+and~\ref{taylor:320}), proves inconvenient in practice (you can try it if
+you like). Fortunately, an alternate, analytic measure, $\langle
+D_N^2 \rangle$, presents itself. The success of the leastsquares
+technique of \S~\ref{mtxinv:320} encourages us to try it.
+When tried, the alternate, analytic measure prospers.
+
+% diagn: review this fairly heavily revised paragraph
+\index{index of stray}
+Section~\ref{prob:070} has actually already introduced $\langle D_N^2
+\rangle$ in another guise as~$\sigma^2$ (the~$\sigma^2$ in this context
+being the standard deviation of an ensemble of a conceptually infinite
+number of instances of~$D_N$, each instance being the sum of~$N$ random
+steps).
+The squared distance~$D_N^2$ is nonnegative, a quality necessary to a
+good index of stray. The squared distance~$D_N^2$ and its
+expectance $\langle D_N^2 \rangle$ are easy to calculate
+and, comparatively, also convenient to use. Moreover, scientists and
+engineers have long been used to accepting such quantities and
+equivalents like~$\sigma^2$ as statistical characterizations.
+We will use them for these reasons among others.
+
+In his lecture, Sands observes that, if the symbol $D_{N1}$ represents
+the ``player's'' position after $N1$ steps, if next step is~$\pm 1$ in
+size, then the ``player's'' position after~$N$ steps must be
$D_N = D_{N1} \pm 1$. The expected value $\langle D_N \rangle = 0$ is
uninteresting as we said, but the expected value $\langle D_N^2 \rangle$
is interesting. And what is this expected value? Sands finds two
possibilities: either the `player' steps forward on his $N$th step, in
+possibilities: either the ``player'' steps forward on his $N$th step, in
which case
\[
\big\langle D_N^2 \big\rangle = \big\langle (D_{N1} + 1)^2 \big\rangle
@@ 1042,15 +1353,19 @@ that
\sigma_o^2 &= (1p_o)p_o.
\end{split}
\eq
As an example of the use, consider a realestate agent who expects to
sell one house per~10 times he shows a prospective buyer a house:
+As an example of the use,\footnote{Decimal notation is again here
+employed. Maybe, abstractly, $2\pi \approx \mbox{0x6.487F}$, but even a
+hexadecimal enthusiast is unlikely to numeralize realestate sales in
+hexadecimal.}
+consider a realestate agent who expects to
+sell one house per~10 times he shows a house to a prospective buyer:
$p_o=1/10=0.10$. The agent's expected result from a single showing,
according to~(\ref{prob:300:55}), is to sell $\mu_o \pm \sigma_o = 0.10
\pm 0.30$ of a house. The agent's expected result from $N = 400$
showings, according to~(\ref{prob:070:40}), is to sell $\mu \pm \sigma =
N\mu_o \pm \left(\sqrt N\right)\sigma_o = 40.0 \pm 6.0$ houses. Such a
conclusion, of course, is valid only to the extent to which the model is
validwhich in a realestate agent's case may be \emph{not very}but
+validwhich in a realestate agent's case might be \emph{not very}but
that nevertheless is how the mathematics of it work.
\index{normal distribution!convergence toward}
@@ 1058,43 +1373,25 @@ As the number~$N$ of attempts grows larg
distribution $f(x)$ of the number of successes begins more and more to
take on the bellshape of Fig.~\ref{prob:normdistfig}'s normal
distribution. Indeed, this makes sense, for one would expect the
aforementioned realestate agent to have a relatively high probability of
selling~39, 40 or~41 houses but a low probability to sell~10 or~70;
thus one would expect $f(x)$ to take on something rather like the
bellshape. If for~400 showings the distribution is $f(x)$ then,
according to~(\ref{prob:050:40}), for $800=400+400$ showings the
distribution must be $f(x) \ast f(x)$. Moreover, since the only known
PDF which, when convolved with itself, does not change shape is the
normal distribution of \S~\ref{prob:100}, one infers that the normal
distribution is the PDF toward which the realestate agent's
distributionand indeed most other distributions of sums of random
variablesmust converge%
\footnote{
+aforementioned realestate agent to enjoy a relatively high probability of
+selling~39, 40 or~41 houses but a low probability to sell~10 or~70.
+Of course, not all distributions that make~39, 40 or~41 more
+likely than~10 or~70 are normal; but the logic of
+\S~\ref{prob:100} does suggest that, if there \emph{were} a shape toward
+which such a distribution tended as~$N$ increased, then that shape could
+hardly be other than the shape of the normal distribution. We will
+leave the argument in that form.\footnote{%
Admittedly, the argument, which supposes that all (or at least most)
aggregate PDFs must tend toward some common shape as~$N$ grows large,
is somewhat specious, or at least unrigorousthough on the other
hand it is hard to imagine any plausible conclusion other than the
 correct one the argument reachesbut one can construct an alternate
+ correct one the argument reachesbut one might construct an alternate
though tedious argument toward the normal distribution on the
 following basis. Counting permutations per \S~\ref{drvtv:220}, derive
 an exact expression for the probability of~$k$ successes in~$N$ tries,
 which is $P_k = \cmbl{N}{k}p^k(1p)^{Nk}$. Considering also the
 probabilities of $k1$ and $k+1$ successes in~$N$ tries, approximate
 the logarithmic derivative of~$P_k$ per~(\ref{drvtv:240.40:10}) as
 $(\pl P_k/\pl k)/P_k \approx (P_{k+1}P_{k1})/2P_k$ or,
 betterremembering that suitable arithmetical approximations are
 permissible in such workas $(\pl P_k/\pl k)/P_k \approx
 (P_{k+1/2}P_{k1/2})/P_k$. Change $x \la (k 
 p_oN)/\sqrt{(1p_o)p_oN}$. Discover a continuous, analytic function,
 which is $f(x) = C\exp(ax^2)$, that for large~$N$ has a similar
 logarithmic derivative in the distribution's peak region. To render
 the arithmetic tractable one might try first the specific case of
 $p=1/2$ and make various arithmetical approximations as one goes, but
 to fill in the tedious details is left as an exercise to the
 interested (penitent?) reader. The author confesses that he prefers
 the specious argument of the narrative.
+ pattern of \S~\ref{prob:100.60} or on another pattern. To fill in the
+ tedious details is left as an exercise to the interested (penitent?)
+ reader. The author confesses that he prefers the specious argument of
+ the narrative.%
}
as $N \ra \infty$.
\index{distribution!default}
For such reasons, applications tend to approximate sums of several
@@ 1122,7 +1419,7 @@ of the most prominent.
\index{computer!pseudorandomnumber generator}
\index{pseudorandom number}
The \emph{uniform distribution} can be defined in any several forms, but
+The \emph{uniform distribution} can be defined in any of several forms, but
the conventional form is
\bq{prob:400:10}
f(x) = \Pi\left(x\frac 12\right) =
@@ 1148,23 +1445,23 @@ transformation of \S~\ref{prob:410}.
\index{retail establishment}
\index{customer}
\index{failure of a mechanical part}
The \emph{exponential distribution} is%
\footnote{
 Unlike the section's other subsections, this one explicitly includes
 the mean~$\mu$ in the expression~(\ref{prob:400:20}) of its
 distribution. The inclusion of~$\mu$ here is admittedly inconsistent.
 The reader who prefers to do so can mentally set $\mu = 1$ and read
 the section in that light. However, in typical applications the
 entire point of choosing the exponential distribution may be to
 specify~$\mu$, or to infer it. The exponential distribution is
 inherently ``$\mu$focused,'' so to speak. The author prefers to
 leave the~$\mu$ in the expression for this reason.
}
+The \emph{exponential distribution} is
+%\footnote{
+% Unlike the section's other subsections, this one explicitly includes
+% the mean~$\mu$ in the expression~(\ref{prob:400:20}) of its
+% distribution. The inclusion of~$\mu$ here is admittedly inconsistent.
+% The reader who prefers to do so can mentally set $\mu = 1$ and read
+% the section in that light. However, in typical applications the
+% entire point of choosing the exponential distribution may be to
+% specify~$\mu$, or to infer it. The exponential distribution is
+% inherently ``$\mu$focused,'' so to speak. The author prefers to
+% leave the~$\mu$ in the expression for this reason.
+%}
\bq{prob:400:20}
 f(x) =
 \frac{1}{\mu}\exp\left(\frac{x}{\mu}\right), \ \ x \ge 0,
+ f(x) = \frac{u(t)}{\mu}\exp\left(\frac{x}{\mu}\right),
\eq
whose mean is
+the $u(t)$ being Heaviside's unit step~(\ref{integ:670:10}).
+The distribution's mean is
\[
\frac 1 \mu \int_0^\infty \exp\left( \frac x \mu \right) x\,dx
=
@@ 1173,7 +1470,7 @@ whose mean is
\right_0^\infty
= \mu
\]
as advertised and whose standard deviation is such that
+as advertised and its standard deviation is such that
\bqb
\sigma^2
&=&
@@ 1181,10 +1478,11 @@ as advertised and whose standard deviati
\\&=&
\left.
 \exp\left( \frac x \mu \right) ( x^2 + \mu^2 )
 \right_0^\infty,
+ \right_0^\infty
\eqb
(the integration by the method of unknown coefficients of
\S~\ref{inttx:240}), which implies that
+\S~\ref{inttx:240} or, quicker, by Table~\ref{inttx:470:tbl}), which
+implies that
\bq{prob:400:25}
\sigma = \mu.
\eq
@@ 1192,14 +1490,68 @@ The exponential's CDF~(\ref{prob:CDF}) a
quantile~(\ref{prob:quantile}) are evidently
\bq{prob:400:28}
\begin{split}
 F(x) &= 1  \exp\left( \frac{x}{\mu} \right), \\
 F^{1}(u) &= \mu\ln(1u).
+ F(x) &= 1  \exp\left( \frac{x}{\mu} \right), \ \ x \ge 0;\\
+ F^{1}(v) &= \mu\ln(1v).
\end{split}
\eq
Among other effects, the exponential distribution models the delay until
some imminent event like a mechanical bearing's failure or the
arrival of a retail establishment's next customer.
+\subsection{The Poisson distribution}
+\label{prob:400.25}
+\index{Poisson distribution}
+\index{distribution!Poisson}
+\index{Poisson, Sim\'eon Denis (17811840)}
+
+The Poisson distribution is\footnote{\cite[chapter~6]{Bulmer}}
+\bq{prob:400:p25}
+ f(x) = \exp(\mu)\sum_{k=0}^\infty\frac{\mu^x\delta(xk)}{x!}.
+\eq
+It comes from the consideration of a large number $N \gg 1$ of
+individually unlikely trials, each trial having a probability~$0 < \ep
+\ll 1$ of success, such that the expected number of successes is $\mu =
+\ep N$.
+\begin{itemize}
+ \item The chance that no trial will succeed is evidently
+ \[
+ \lim_{\eta \ra 0^{+}} \int_{\eta}^\eta f(x)dx = (1\ep)^N
+ \approx \exp(\ep N) = \exp(\mu).
+ \]
+ \item The chance that exactly one trial will succeed is
+ \[
+ \begin{split}
+ \lim_{\eta \ra 0^{+}} \int_{1\eta}^{1+\eta} f(x)dx &= \cmb{N}{1}(\ep)(1\ep)^{N1}
+ \\&\approx \ep N\exp(\ep N) = \mu\exp(\mu).
+ \end{split}
+ \]
+ \item The chance that exactly two trials will succeed is
+ \[
+ \begin{split}
+ \lim_{\eta \ra 0^{+}} \int_{2\eta}^{2+\eta} f(x)dx &= \cmb{N}{2}(\ep^2)(1\ep)^{N2}
+ \\&\approx \frac{(\ep N)^2}{2!}\exp(\ep N) = \frac{\mu^2\exp(\mu)}{2!}.
+ \end{split}
+ \]
+ \item And so on.
+\end{itemize}
+In the limit as $N \ra \infty$ and $\ep \ra 0^{+}$, the product $\mu =
+\ep N$ remaining finite, the approximations become exact
+and~(\ref{prob:400:p25}) results.
+
+Integrating~(\ref{prob:400:p25}) to check,
+\bqb
+ \int_{\infty}^{\infty} f(x) dx
+ &=& \exp(\mu)\int_{\infty}^{\infty}\sum_{k=0}^\infty\frac{\mu^x\delta(xk)}{x!} dx
+ \\&=& \exp(\mu)\sum_{k=0}^\infty\int_{\infty}^{\infty}\frac{\mu^x\delta(xk)}{x!} dx
+ \\&=& \exp(\mu)\sum_{k=0}^\infty\frac{\mu^k}{k!}
+ = \exp(\mu)\exp(\mu) = 1,
+\eqb
+as~(\ref{prob:050:10}) requires.
+
+Compared to the exponential distribution (\S~\ref{prob:400.20}), the
+Poisson distribution serves to model for example the number of customers
+to arrive at a retail establishment during the next hour.
+
\subsection{The Rayleigh distribution}
\label{prob:400.30}
\index{Rayleigh distribution}
@@ 1238,21 +1590,20 @@ whence
\eqb
which implies the distribution
\bq{prob:400:30}
 f(\rho) = \rho \exp\left( \frac{\rho^2}{2} \right),
 \ \ \rho \ge 0.
+ f(\rho) = u(\rho) \rho \exp\left( \frac{\rho^2}{2} \right).
\eq
This is the Rayleigh distribution. That it is a proper distribution
according to~(\ref{prob:050:10}) is proved by evaluating the integral
\bq{prob:400:32}
\int_0^{\infty} f(\rho) \,d\rho = 1
\eq
using the method of \S~\ref{fouri:130}.
+using part of the method of \S~\ref{fouri:130}.
Rayleigh's CDF~(\ref{prob:CDF}) and quantile~(\ref{prob:quantile}) are
evidently
+evidently\footnote{\cite[\S~5.2]{Papoulis}}
\bq{prob:400:34}
\begin{split}
 F(\rho) &= 1  \exp\left( \frac{\rho^2}{2} \right), \\
 F^{1}(u) &= \sqrt{2\ln(1u)}.
+ F(\rho) &= 1  \exp\left( \frac{\rho^2}{2} \right), \ \ \rho \ge 0;\\
+ F^{1}(v) &= \sqrt{2\ln(1v)}.
\end{split}
\eq
The Rayleigh distribution models among others the distance~$\rho$ by
@@ 1260,10 +1611,52 @@ which a missile might miss its target.
\index{azimuth}
Incidentally, there is nothing in the mathematics to favor any
particular value of~$\phi$ over another,~$\phi$ being the azimuth toward
+particular value of~$\phi$ over another,~$\phi$ being the azimuth at
which the missile misses, for the integrand $\exp(\rho^2/2) \rho
\,d\rho\,d\phi$ above includes no~$\phi$; so, unlike~$\rho$, $\phi$ by
symmetry will be uniformly distributed.
+\,d\rho\,d\phi$ includes no~$\phi$. The azimuth~$\phi$ must by
+symmetry therefore be uniformly distributed.
+
+Rayleigh's mean and standard deviation are computed
+via~(\ref{prob:stat}) to be
+\bq{prob:400:35}
+ \begin{split}
+ \mu &= \frac{\sqrt{2\pi}}{2}, \\
+ \sigma^2 &= 2  \frac{2\pi}{4}.
+ \end{split}
+\eq
+by
+\[
+ \mu = \int_0^\infty \rho^2 \exp\left( \frac{\rho^2}{2} \right)
+ \,d\rho = \frac{\sqrt{2\pi}}{2}
+\]
+(compare eqn.~\ref{prob:100:10}, observing however that the present
+integral integrates over only half the domain) and
+\bqb
+ \sigma^2 &=& \int_0^\infty \left(\rho  \frac{\sqrt{2\pi}}{2}\right)^2
+ \rho \exp\left( \frac{\rho^2}{2} \right) \,d\rho
+ \\&=& \int_0^\infty
+ \rho^3 \exp\left( \frac{\rho^2}{2} \right) \,d\rho
+ \\&&\quad\mbox{} \sqrt{2\pi}\int_0^\infty
+ \rho^2 \exp\left( \frac{\rho^2}{2} \right) \,d\rho
+ \\&&\quad\mbox{} +\frac{2\pi}{4}\int_0^\infty
+ \rho \exp\left( \frac{\rho^2}{2} \right) \,d\rho
+ \\&=& \int_0^\infty
+ \rho^3 \exp\left( \frac{\rho^2}{2} \right) \,d\rho
+  \frac{2\pi}{2} + \frac{2\pi}{4}
+ \\&=& \int_{\rho=0}^\infty
+ \rho^2 \,d\left[\exp\left( \frac{\rho^2}{2} \right)\right]
+  \frac{2\pi}{4}
+ \\&=&
+ \makebox[3pt][c]{}\left.\rho^2 \exp\left( \frac{\rho^2}{2}\right)\right_0^\infty
+ +\int_{\rho=0}^\infty
+ \exp\left( \frac{\rho^2}{2} \right) \,d\left[\rho^2\right]
+  \frac{2\pi}{4}
+ \\&=&
+ 0+2\int_0^\infty
+ \rho\exp\left( \frac{\rho^2}{2} \right) \,d\rho
+  \frac{2\pi}{4}
+ = 2  \frac{2\pi}{4}.
+\eqb
\subsection{The Maxwell distribution}
\label{prob:400.40}
@@ 1282,45 +1675,48 @@ distribution which results, the Maxwell
f(r) = \frac{2r^2}{\sqrt{2\pi}} \exp\left( \frac{r^2}{2} \right),
\ \ r \ge 0,
\eq
which models, among others, the speed at which an air molecule might
+which models among others the speed at which an air molecule might
+\linebreak % bad break
travel.%
\footnote{\cite[eqn.~I:40.7]{Feynman}}
%\footnote{
% The section has omitted several reasonably wellknown distributions.
% As the section ends it will pass these over, except one, without
% special notice. The one is the \emph{chisquare} distribution
% \cite[Ch.~13]{Adler}. The reason the section has omitted the
+% \cite[chapter~13]{Adler}. The reason the section has omitted the
% chisquare is that the principal application of the chisquare lies in
% ``hypothesis testing,'' an application of statistical inference this
% book does not treat.
%}
\subsection{The lognormal distribution}
\label{prob:400.50}
\index{lognormal distribution}
\index{distribution!lognormal}

In the \emph{lognormal distribution,} it is not~$x$ but
\bq{prob:400:51}
 x_o \equiv \frac{\ln x}{\alpha}
\eq
that is normally distributed, a fairly common case. Setting $x =
g(x_o) = \exp \alpha x_o$ and $f_o(x_o) = \Omega(x_o)$
in~(\ref{prob:080:10}), one can express the lognormal distribution in
the form%
\footnote{\cite[Ch.~5]{Papoulis}}
\bq{prob:400:50}
 f(x) = \frac{1}{\alpha x}\Omega\left(\frac{\ln x}{\alpha}\right).
\eq
+%\subsection{The lognormal distribution}
+%\label{prob:400.50}
+%\index{lognormal distribution}
+%\index{distribution!lognormal}
+%
+%In the \emph{lognormal distribution,} it is not~$x$ but
+%\bq{prob:400:51}
+% y \equiv \frac{\ln x}{\alpha}
+%\eq
+%that is normally distributed, a fairly common case. Setting $x =
+%g(y) \equiv \exp \alpha y$ and $f_o(y) = \Omega(y)$
+%in~(\ref{prob:080:10}), one can express the lognormal distribution in
+%the form%
+%\footnote{\cite[eqn.~511]{Papoulis}}
+%\bq{prob:400:50}
+% f(x) = \frac{1}{\alpha x}\Omega\left(\frac{\ln x}{\alpha}\right).
+%\eq
% 
\section{The BoxMuller transformation}
\label{prob:410}
\index{BoxMuller transformation}
\index{transformation, BoxMuller}
\index{Box, G.E.P. (1919)}
\index{Muller, Mervin~E.}
+\index{transformation!BoxMuller}
+\index{Box, G.~E.~P.\ (19192013)}
+\index{Muller, Mervin~E.\ (1928)}
+% Retain this note: on May 13, 2017, Dr. Muller replied to T. H. Black's
+% email, indicating that his date of birth is June 1, 1928.
\index{distribution!conversion between two}
\index{quantile!use of to convert between distributions}
@@ 1330,19 +1726,21 @@ Rayleigh. Unfortunately, we lack a quan
distribution. However, we can still convert uniform to normal by way of
Rayleigh as follows.
Section~\ref{prob:400.30} has shown how Rayleigh gives the
distance~$\rho$ by which a missile misses a target when each of~$x$
and~$y$ are normally distributed and, interestingly, how the
azimuth~$\phi$ is uniformly distributed under these conditions. Because
we know the quantiles, to convert a pair of instances~$u$ and~$v$ of a
uniformly distributed random variable to Rayleigh's distance and azimuth
is thus straightforward:%
\footnote{
+Section~\ref{prob:400.30} has associated the
+Rayleigh distribution with
+the distance~$\rho$ by which a missile misses its target, the~$x$
+and~$y$ coordinates of the missile's impact each being normally
+distributed over equal standard deviations.
+Section~\ref{prob:400.30} has further drawn out the
+uniform distribution of the impact's azimuth~$\phi$. Because
+we know Rayleigh's quantiles, we are able to convert a pair of
+instances~$u$ and~$v$ of a uniformly distributed random variable to
+Rayleigh's distance and azimuth by\footnote{%
One can eliminate a little trivial arithmetic by appropriate changes
of variable in~(\ref{prob:410:20}) like $u' \la 1u$, but to
do so saves little computational time and makes the derivation harder
to understand. Still, the interested reader might complete the
 improvement as an exercise.
+ improvement as an exercise.%
}
\bq{prob:410:20}
\begin{split}
@@ 1363,13 +1761,11 @@ lack an easy way to convert a single uni
instance, we can convert a \emph{pair} of uniform instances to a pair of
normal instances. Equations~(\ref{prob:410:20}) and~(\ref{prob:410:25})
are the \emph{BoxMuller transformation.}%
\footnote{\cite{EWW}}
+\footnote{\cite{Box/Muller}\cite{EWW}}
% 
\section[The normal CDF at large arguments]{%
 The normal cumulative distribution function at large arguments%
}
+\section{The normal CDF at large arguments}
\label{prob:750}
\index{normal distribution!cumulative distribution function of}
\index{cumulative distribution function!of the normal distribution}
@@ 1437,7 +1833,7 @@ one might prefer a more efficient formul
Such methods prompt one to wonder how much useful mathematics our
civilization should have forgone had Leonhard Euler (17071783),
Carl Friedrich Gauss (17771855) and other hardy mathematical
 minds of the past computers to lean upon.
+ minds of the past had computers to lean on.
}
\item
One might regard a prudent measure of elegance, even in
@@ 1514,9 +1910,8 @@ in which the convenient notation
= (m)(m2)\cdots(6)(4)(2) &\mbox{for even~$m$,}
\end{cases}
\eq
is introduced.%
\cite[Exercise~2.2.15]{Andrews}
The last expression for $1  F_\Omega(x)$ is better written
+is introduced.\footnote{\cite[Exercise~2.2.15]{Andrews}}
+The last expression for $1  F_\Omega(x)$ is better written,
\bqa
1  F_\Omega(x) &=& \frac{\Omega(x)}{x} [ S_n(x) + R_n(x) ],
\label{prob:750:20}\\
@@ 1529,12 +1924,9 @@ The last expression for $1  F_\Omega(x)
\xn
\eqa
The series $S_n(x)$ is an \emph{asymptotic series,} also
called an \emph{semiconvergent series.}%
\footnote{
 % diagn: complete the citation here?
 As professional use them, the adjectives \emph{asymptotic} and
 \emph{semiconvergent} apparently can differ slightly in
 meaning~\cite{Andrews}. We'll not worry about that here.
+called a \emph{semiconvergent series.}%
+\footnote{\label{prob:750fn1}%
+ \cite[\S~1.4.1]{Andrews}%
}
So long as
$x \gg 1$, the first several terms of the series will
@@ 1554,7 +1946,7 @@ Fortunately, nothing requires us to let
free to choose~$n$ strategically as we likefor instance to exclude
from~$S_n$ the series' least term in magnitude and all the terms
following. So excluding leaves us with the problem of evaluating the
integral~$S_n$, but see:
+integral~$R_n$, but see:
\bqb
\leftR_n(x)\right
&\le&
@@ 1578,7 +1970,7 @@ Using~(\ref{prob:400:30}) and~(\ref{prob
\leftR_n(x)\right
\le
\frac{(2n1)!!}{\leftx\right^{2n}},
 \ \ \Im(x) = 0,
+ \ \ \Im(x) = 0,\ \Re(x) > 0,
\eq
which in view of~(\ref{prob:750:20}) has that the
magnitude $\leftR_n\right$ of the error due to truncating the series
@@ 1588,39 +1980,90 @@ sought to estimate the CDF accurately fo
% 
+\section{Asymptotic series}
+\label{prob:760}
+\index{asymptotic series}
+\index{series!asymptotic}
+
+\index{truncation}
+\index{series!truncation of}
+Section~\ref{prob:750} has incidentally introduced the \emph{asymptotic
+series} and has shown how to treat it.
+
+Asymptotic series are strange. They diverge, but only after
+approaching a sum of interest. Some asymptotic series approach
+the sum of interest quite closely, and moreover do so in such a way that
+the closenessthat is, the error in the sumcan with sufficient
+effort be quantified. The error in the sum of the asymptotic series of
+\S~\ref{prob:750} has been found not to exceed the magnitude of the
+first omitted term; and though one may have to prove it specially for
+each such series, various series one encounters in practice tend to
+respect bounds of the same kind.
+
+\index{semiconvergent series}
+\index{series!semiconvergent}
+As \S~\ref{prob:750} has noted, asymptotic series are sometimes
+alternately called \emph{semiconvergent series.}\footnote{See
+footnote~\ref{prob:750fn1}.}
+
+An ordinary, convergent series is usually preferable to
+an asymptotic series, of course, especially in the subdomain near the
+convergent series' expansion point (\S~\ref{taylor:317}).
+However, a convergent series is not always available; and, even when it
+is, its expansion point may lie so distant that the series becomes
+numerically impractical to total.
+
+An asymptotic series can fill the gap.
+
+Aside from whatever practical applications an asymptotic series can fill,
+this writer finds the topic of asymptotic series fascinating. The
+topic is curious, is it not? How can a \emph{divergent series} reach a
+definite total? The answer seems to be: it cannot reach a definite
+total but can draw arbitrarily close to one. In~(\ref{prob:750:20})
+and~(\ref{prob:750:25}) for example, the larger the argument, the
+closer the draw. It is a paradox yet, surprisingly, it works.
+
+Asymptotic series arise in the study and application of special functions,
+including (as we have seen) the $\Omega(\cdot)$ of the present chapter.
+For this reason and maybe others, the applied mathematician will exercise and
+exploit asymptotic series from time to time.
+
+% 
+
\section{The normal quantile}
\label{prob:775}
\index{normal distribution!quantile of}
\index{quantile!of the normal distribution}
\index{NewtonRaphson iteration}
Though no straightforward quantile~(\ref{prob:quantile}) formula
+Though no straightforward quantile formula to satisfy~(\ref{prob:quantile})
for the normal distribution seems to be known, nothing prevents one from
calculating the quantile via the NewtonRaphson
iteration~(\ref{drvtv:NR})%
\footnote{
When implementing numerical algorithms like these on the computer one
 should do it intelligently. For example, if $F_\Omega(x_k)$ and~$u$ are
 both likely to be close to~1, do not ask the computer to calculate
+ should do it intelligently. For example, if $F_\Omega(x_k)$ and~$v$ are
+ both likely to be close to~1, then do not ask the computer to calculate
and/or store these quantities. Rather, ask it to calculate and/or
 store $1F_\Omega(x_k)$ and $1u$. Then, when~(\ref{prob:775:10}) instructs
 you to calculate a quantity like $F_\Omega(x_k)u$, let the computer instead
 calculate $[1u][1F_\Omega(x_k)]$, which is arithmetically no different but
+ store $1F_\Omega(x_k)$ and $1v$. Then, when~(\ref{prob:775:10}) instructs
+ you to calculate a quantity like $F_\Omega(x_k)v$, let the computer instead
+ calculate $[1v][1F_\Omega(x_k)]$, which is arithmetically no different but
numerically, on the computer, much more precise.
}
\bq{prob:775:10}
\begin{split}
 x_{k+1} &= x_k  \frac{F_\Omega(x_k)u}{\Omega(x_k)}, \\
 F_\Omega^{1}(u) &= \lim_{k \ra \infty} x_k, \\
+ x_{k+1} &= x_k  \frac{F_\Omega(x_k)v}{\Omega(x_k)}, \\
+ F_\Omega^{1}(v) &= \lim_{k \ra \infty} x_k, \\
x_0 &= 0,
\end{split}
\eq
where $F_\Omega(x)$ is as given by~(\ref{prob:100:40})
and/or~(\ref{prob:750:20}) and $\Omega(x)$, naturally, is as given
+and/or~(\ref{prob:750:20}) and where $\Omega(x)$ is, as usual, as given
by~(\ref{prob:normdist}). The shape of the normal CDF as seen in
Fig.~\ref{prob:normdistfig}curving downward traveling right from
$x=0$, upward when traveling left, evidently guarantees convergence
per Fig.~\ref{drvtv:270:fig1}.
+Fig.~\ref{prob:normdistfig} on
+page~\pageref{prob:normdistfig}curving downward traveling right from
+$x=0$, upward when traveling leftevidently guarantees convergence
+per Fig.~\ref{drvtv:270:fig1}, page~\pageref{drvtv:270:fig1}.
\index{lazy convergence}
\index{convergence!lazy}
@@ 1630,7 +2073,7 @@ per Fig.~\ref{drvtv:270:fig1}.
In the largeargument limit,
\[
\begin{split}
 1u &\ll 1, \\
+ 1v &\ll 1, \\
x &\gg 1;
\end{split}
\]
@@ 1643,32 +2086,32 @@ Substituting this into~(\ref{prob:775:10
\bqb
x_{k+1} &\approx&
x_k  \frac{1}{\Omega(x_k)}\left[
 1  u  \frac{\Omega(x_k)}{x_k}
+ 1  v  \frac{\Omega(x_k)}{x_k}
\left( 1  \frac{1}{x_k^2} + \cdots \right)
\right]
\\&\approx&
 x_k  \frac{1u}{\Omega(x_k)}
+ x_k  \frac{1v}{\Omega(x_k)}
+ \frac{1}{x_k}  \frac{1}{x_k^3} + \cdots
\\&\approx&
 x_k  \frac{\left(\sqrt{2\pi}\right)(1u)}{1  x_k^2/2 + \cdots}
+ x_k  \frac{\left(\sqrt{2\pi}\right)(1v)}{1  x_k^2/2 + \cdots}
+ \frac{1}{x_k}  \frac{1}{x_k^3} + \cdots
\\&\approx&
 x_k  \bigg(\sqrt{2\pi}\bigg)\bigg(1u\bigg)\bigg(1 + \frac{x_k^2}{2} + \cdots \bigg)
+ x_k  \bigg(\sqrt{2\pi}\bigg)\bigg(1v\bigg)\bigg(1 + \frac{x_k^2}{2} + \cdots \bigg)
+ \frac{1}{x_k}  \frac{1}{x_k^3} + \cdots
\\&\approx&
 x_k  \bigg(\sqrt{2\pi}\bigg)\bigg(1u\bigg)
+ x_k  \bigg(\sqrt{2\pi}\bigg)\bigg(1v\bigg)
+ \frac{1}{x_k} + \cdots,
\eqb
suggesting somewhat lazy, but usually acceptable convergence in domains
of typical interest (the convergence might be unacceptable if, for
example, $x>\mbox{0x40}$, but the writer has never encountered an
application of the normal distribution $\Omega(x)$ or its incidents at
+application of the normal distribution $\Omega[x]$ or its incidents at
such large values of~$x$). If unacceptable, various stratagems might be
tried to accelerate the NewtonRaphson, orif you have no need to
impress anyone with the pure elegance of your technique but only want
the right answer reasonably fastyou might just search for the root in
the na\"ive way, trying $F_\Omega(2^0)$, $F_\Omega(2^1)$,
$F_\Omega(2^2)$ and so on until identifying a bracket $F_\Omega(2^{k1}) < u
+$F_\Omega(2^2)$ and so on until identifying a bracket $F_\Omega(2^{k1}) < v
\le F_\Omega(2^k)$; then dividing the
bracket in half, then in half again, then again and again until
satisfied with the accuracy thus achieved, or until the bracket were
@@ 1835,7 +2278,7 @@ choice of technique.%
%\footnote{
% The inertial frame of reference is a powerful engine for the
% generation of physical insight
% % diagn
+% % diagn
% (and an equally powerful engine for the generation of metaphysical
% confusion, when the inertial frame is mistaken for a teleological
% principle, but let that pass~\cite{Feser}),
@@ 1917,7 +2360,7 @@ choice of technique.%
%\eq
%the particles traveling in unbiasedly random directions.
%Either~(\ref{prob:900:520}) or
%% bad break
+%% bad break
%(\ref{prob:900:530})
%thus suffices to state the conjecture. We will
%prefer~(\ref{prob:900:530}).
diff pruN 0.53.201204142/tex/pstxkey.sty 0.56.20180123.12/tex/pstxkey.sty
 0.53.201204142/tex/pstxkey.sty 20120415 16:42:12.000000000 +0000
+++ 0.56.20180123.12/tex/pstxkey.sty 19700101 00:00:00.000000000 +0000
@@ 1,46 +0,0 @@
%%
%% This is file `pstxkey.sty',
%% generated with the docstrip utility.
%%
%% The original source files were:
%%
%% xkeyval.dtx (with options: `pxklatex')
%%
%% 
%% Copyright (C) 20042008 Hendri Adriaens
%% 
%%
%% This work may be distributed and/or modified under the
%% conditions of the LaTeX Project Public License, either version 1.3
%% of this license or (at your option) any later version.
%% The latest version of this license is in
%% http://www.latexproject.org/lppl.txt
%% and version 1.3 or later is part of all distributions of LaTeX
%% version 2003/12/01 or later.
%%
%% This work has the LPPL maintenance status "maintained".
%%
%% This Current Maintainer of this work is Hendri Adriaens.
%%
%% This work consists of the file xkeyval.dtx and derived files
%% keyval.tex, xkvtxhdr.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, pstxkey.tex, pstxkey.sty, xkveca.cls, xkvecb.cls,
%% xkvesa.sty, xkvesb.sty, xkvesc.sty, xkvex1.tex, xkvex2.tex,
%% xkvex3.tex and xkvex4.tex.
%%
%% The following files constitute the xkeyval bundle and must be
%% distributed as a whole: readme, xkeyval.pdf, keyval.tex,
%% pstxkey.sty, pstxkey.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, xkvtxhdr.tex, pstxkey.dtx and xkeyval.dtx.
%%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
\ProvidesPackage{pstxkey}
 [2005/11/25 v1.6 package wrapper for pstxkey.tex (HA)]
\ifx\PSTXKeyLoaded\endinput\else\input pstxkey \fi
\DeclareOptionX*{%
 \PackageWarning{pstxkey}{Unknown option `\CurrentOption'}%
}
\ProcessOptionsX
\endinput
%%
%% End of file `pstxkey.sty'.
diff pruN 0.53.201204142/tex/pstxkey.tex 0.56.20180123.12/tex/pstxkey.tex
 0.53.201204142/tex/pstxkey.tex 20120415 16:42:12.000000000 +0000
+++ 0.56.20180123.12/tex/pstxkey.tex 19700101 00:00:00.000000000 +0000
@@ 1,70 +0,0 @@
%%
%% This is file `pstxkey.tex',
%% generated with the docstrip utility.
%%
%% The original source files were:
%%
%% xkeyval.dtx (with options: `pxktex')
%%
%% 
%% Copyright (C) 20042008 Hendri Adriaens
%% 
%%
%% This work may be distributed and/or modified under the
%% conditions of the LaTeX Project Public License, either version 1.3
%% of this license or (at your option) any later version.
%% The latest version of this license is in
%% http://www.latexproject.org/lppl.txt
%% and version 1.3 or later is part of all distributions of LaTeX
%% version 2003/12/01 or later.
%%
%% This work has the LPPL maintenance status "maintained".
%%
%% This Current Maintainer of this work is Hendri Adriaens.
%%
%% This work consists of the file xkeyval.dtx and derived files
%% keyval.tex, xkvtxhdr.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, pstxkey.tex, pstxkey.sty, xkveca.cls, xkvecb.cls,
%% xkvesa.sty, xkvesb.sty, xkvesc.sty, xkvex1.tex, xkvex2.tex,
%% xkvex3.tex and xkvex4.tex.
%%
%% The following files constitute the xkeyval bundle and must be
%% distributed as a whole: readme, xkeyval.pdf, keyval.tex,
%% pstxkey.sty, pstxkey.tex, xkeyval.sty, xkeyval.tex, xkvview.sty,
%% xkvltxp.sty, xkvtxhdr.tex, pstxkey.dtx and xkeyval.dtx.
%%
\csname PSTXKeyLoaded\endcsname
\let\PSTXKeyLoaded\endinput
\edef\PSTXKeyCatcodes{%
 \catcode`\noexpand\@\the\catcode`\@\relax
 \let\noexpand\PSTXKeyCatcodes\relax
}
\catcode`\@=11\relax
\ifx\ProvidesFile\@undefined
 \message{2005/11/25 v1.6 PSTricks specialization of xkeyval (HA)}
 \ifx\XKeyValLoaded\endinput\else\input xkeyval \fi
\else
 \ProvidesFile{pstxkey.tex}
 [2005/11/25 v1.6 PSTricks specialization of xkeyval (HA)]
 \@addtofilelist{pstxkey.tex}
 \RequirePackage{xkeyval}
\fi
\def\pst@famlist{}
\def\pst@addfams#1{%
 \XKV@for@n{#1}\XKV@tempa{%
 \@expandtwoargs\in@{,\XKV@tempa,}{,\pst@famlist,}%
 \ifin@\else\edef\pst@famlist{\pst@famlist,\XKV@tempa}\fi
 }%
}
\def\psset{%
 \expandafter\@testopt\expandafter\pss@t\expandafter{\pst@famlist}%
}
\def\pss@t[#1]#2{\setkeys+[psset]{#1}{#2}\ignorespaces}
\def\@psset#1,\@nil{%
 \edef\XKV@tempa{\noexpand\setkeys+[psset]{\pst@famlist}}%
 \XKV@tempa{#1}%
}
\PSTXKeyCatcodes
\endinput
%%
%% End of file `pstxkey.tex'.
diff pruN 0.53.201204142/tex/purec.tex 0.56.20180123.12/tex/purec.tex
 0.53.201204142/tex/purec.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/purec.tex 20180123 23:08:38.000000000 +0000
@@ 7,25 +7,24 @@
\index{professional mathematics}
\index{pure mathematics}
% diagn: The bib.bib might want a URL, and maybe even an entirely
% reconstructed entry, for Arnold.

At least three of the various disciplines of pure mathematics stand out
for their pedagogical intricacy and the theoretical depth of their core
results. The first of the three is number theory which, except for the
simple results of \S~\ref{noth:220}, scientists and engineers tend to
get by largely without. The second is matrix theory (Chs.~\ref{matrix}
+get by largely without. The second is matrix theory (chapters~\ref{matrix}
through~\ref{eigen}), a bruiser of a discipline the applied
mathematician of the computer agetry though he mightcan hardly
escape. The third is the pure theory of the complex variable.
The introduction's \S~\ref{intro:310} admires the beauty of the pure
theory of the complex variable even while admitting that ``its arc takes
off too late and flies too far from applications for such a book as
this.'' To develop the pure theory properly is a worthy booklength
+theory of the complex variable even while admitting that that
+theory's
+``arc regrettably takes off too late and flies too far from
+applications for such a book as this.''
+To develop the pure theory properly is a worthy booklength
endeavor of its own requiring moderately advanced preparation on its
reader's part which, however, the reader who has reached the end of the
present book's Ch.~\ref{inttx} possesses. If the writer doubts the
+present book's chapter~\ref{inttx} possesses. If the writer doubts the
strictly applied \emph{necessity} of the pure theory, still, he does not
doubt its health to one's overall mathematical formation. It provides
another way to think about complex numbers. Scientists and engineers
@@ 41,7 +40,7 @@ Taylor's are the results we will sketch.
presentations far more complete.
%(This presentation, as advertised, is just a sketch.)
%(The reader who has reached the end of the
%(Ch.~\ref{inttx} will understand already why the presentation is strictly
+%(chapter~\ref{inttx} will understand already why the presentation is strictly
%optional, interesting maybe but deemed unnecessary to the book's applied
%mathematical development.)
@@ 49,7 +48,7 @@ presentations far more complete.
\index{impressed residue theorem, Cauchy's}
\index{residue theorem, Cauchy's impressed}
\index{cleverness}
%\index{Arnold, D.N.}
+%\index{Arnold, D.~N.}
\emph{Cauchy's impressed residue theorem}%
\footnote{
This is not a standard name. Though they name various associated
@@ 69,7 +68,7 @@ is taken and if $f(z)$ is everywhere ana
within and along the contour. More than one proof of the theorem is
known, depending on the assumptions from which the mathematician prefers
to start, but this writer is partial to an instructively clever proof he
has learned from D.N.~Arnold%
+has learned from D.~N.\ Arnold%
\footnote{\cite[\S~III]{Arnold:1997}}
which goes as follows. Consider the function
\[
@@ 81,7 +80,7 @@ whose derivative with respect to the par
[(d/d\zeta)f(\zeta)]_{\zeta=(\cdot)}$ of
\S~\ref{drvtv:240} but the notation is handy here because it evades
the awkward circumlocution of changing $\zeta \la z$
 in~(\ref{purec:100:10}) and then writing
+ in~(\ref{purec:100:10}) and then writing,
\[
\frac{\pl g}{\pl t} = \frac{1}{i2\pi} \oint \frac{[(d/d\zeta)f(\zeta)]_{\zeta=z+(t)(wz)}}{wz} \,dw.
\]
@@ 100,11 +99,13 @@ We notice that this is
\\&=&
\frac{1}{i2\pi} \left\{
\frac{f[z+(t)(wz)]}{t}
 \right\}_a^b,
+ \right\}_{w=a}^b,
\eqb
where~$a$ and~$b$ respectively represent the contour integration's
beginning and ending points. But this integration ends where it begins,
so $a=b$ and the factor~$\{\cdot\}_a^b$ in braces vanishes, whereupon
+beginning and ending points. But this integration ends where it begins
+and its integrand (lacking a~$w$ in the denominator) is analytic within
+and along the contour, so $a=b$ and the factor~$\{\cdot\}_{w=a}^b$ in
+braces vanishes, whereupon
\[
\frac{\pl g}{\pl t} = 0,
\]
@@ 143,12 +144,12 @@ believed and, once there, is asked to ca
appendix only, the applied reader may feel easier about trusting it.)
\index{Goursat, Edouard (18581936)}
%\index{Hildebrand, F.B.}
+%\index{Hildebrand, F.~B.}
One could follow Arnold hence toward the proof of the theorem of one
Goursat and further toward various other interesting results, a path of
study the writer recommends to sufficiently interested readers:
see~\cite{Arnold:1997}. Being in a tremendous hurry ourselves, however,
we will leave Arnold and follow F.B.~Hildebrand%
+we will leave Arnold and follow F.~B.\ Hildebrand%
\footnote{\cite[\S~10.7]{Hildebrand}}
directly toward the Taylor series. Positing some expansion point~$z_o$
and then expanding~(\ref{purec:100:10}) geometrically
@@ 186,18 +187,17 @@ The important theoretical implication of
\emph{every function has a Taylor series about any point across whose
immediate neighborhood the function is analytic.} There evidently is no
such thing as an analytic function without a Taylor seriesa fact we
already knew if we have read and believed Ch.~\ref{taylor}, but some
+already knew if we have read and believed chapter~\ref{taylor}, but some
readers may find it more convincing this way.
Comparing~(\ref{purec:100:40}) against~(\ref{taylor:310:20}),
incidentally, we have also that
\bq{purec:100:50}
 \left.\frac{d^kf}{dz^k}\right_{z=z_o}
+ \left.\frac{d^k\!f}{dz^k}\right_{z=z_o}
= \frac{k!}{i2\pi} \oint \frac{f(w)}{(wz_o)^{k+1}} \,dw,
\eq
which is an alternate way to write~(\ref{taylor:350:30}).
% diagn: This paragraph is new and wants review.
Close inspection of the reasoning by which we have
reached~(\ref{purec:100:40}) reveals, quite by the way, at least one
additional result which in itself tends to vindicate the pure theory's
diff pruN 0.53.201204142/tex/specf.tex 0.56.20180123.12/tex/specf.tex
 0.53.201204142/tex/specf.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/specf.tex 20170628 20:55:19.000000000 +0000
@@ 4,7 +4,8 @@
\label{specf}
\index{special functions}
[This chapter is a rough, partial draft.]
+% diagn
+[This chapter is yet only a stub of a draft.]
No topic more stirs the pure mathematician's imagination than that of
number theory, so briefly addressed by this book's \S~\ref{noth:220}.
@@ 14,7 +15,7 @@ curiosity may be that of special functio
What is a \emph{special function?} Trouble arises at once, before the
first mathematical symbol strikes the page, for it is not easy to
discover a precise definition of the term. N.N.~Lebedev and Larry~C.
+discover a precise definition of the term. N.~N.\ Lebedev and Larry~C.
Andrews, authors respectively in Russian and English of two of the
better postWorld War~II books on the topic,%
\footnote{
@@ 46,7 +47,7 @@ on the author's desk correctly defines s
Here is what. A \emph{special function} is an analytic function
(\S~\ref{taylor:320})likely of a single and at most of a few complex
scalar variables, harder to analyze and evaluate than the
\emph{elementary functions} of Chs.~\ref{alggeo} through~\ref{cexp},
+\emph{elementary functions} of chapters~\ref{alggeo} through~\ref{cexp},
defined in a suitably canonical formthat serves to evaluate an
integral, to solve an integral equation,%
\footnote{
@@ 58,16 +59,16 @@ integral, to solve an integral equation,
in which the unknown is not a variable but a function $f(w)$ that
operates on a dummy variable of integration. Actually, we have
already met integral equations in disguise, in discretized form, in
 matrix notation (Chs.~\ref{matrix} through~\ref{eigen}) resembling
+ matrix notation (chapters~\ref{matrix} through~\ref{eigen}) resembling
\[
G\ve f = \ve h,
\]
which means no more than it seems to mean; so maybe integral equations
 are not so strange as they look. The integral equation is just the
+ are not so strange as they look. The sample integral equation is just the
matrix equation with the discrete vectors~$\ve f$ and~$\ve h$ replaced
by their continuous versions $\Delta w\,f(j\,\Delta w)$ and $\Delta
z\,h(i\,\Delta z)$ (the~$i$ representing not the imaginary unit here
 but just an index, as in Ch.~\ref{matrix}).
+ but just an index, as in chapter~\ref{matrix}).
}
or to solve a differential equation elementary functions alone cannot
evaluate or solve. Such a definition approximates at least the aspect
@@ 89,10 +90,21 @@ once.
\section{The Gaussian pulse and its moments}
\label{specf:220}
+\index{Gaussian pulse}
+\index{pulse, Gaussian}
We have already met
+We have already met the versatile Gaussian pulse,
\[
 \Omega(x) = \frac{\exp\left(x^2/2\right)}{\sqrt{2\pi}},
+ \Omega(t) = \frac{\exp\left(t^2/2\right)}{\sqrt{2\pi}},
\]
as~(\ref{prob:normdist}).
+in \S~\ref{fouri:130} and, earlier, in Fig.~\ref{fours:095:fig1}.
+Chapters~\ref{fours} and~\ref{fouri}, treating the series and transform
+of Fourier, have chiefly considered real~$t$, but one can more generally
+change $z \la t$ to give the function
+\bq{specf:220:gauss}
+ \Omega(z) \equiv \frac{\exp\left(z^2/2\right)}{\sqrt{2\pi}}
+\eq
+a complex argument~$z$.
+
+[Chapter to be \mbox{continued\ldots.}]
diff pruN 0.53.201204142/tex/stub.tex 0.56.20180123.12/tex/stub.tex
 0.53.201204142/tex/stub.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/stub.tex 20180123 22:11:12.000000000 +0000
@@ 9,135 +9,48 @@
\label{stub}
\setcounter{footnote}{0}
{
+{%
\newcounter{enumt}
\setcounter{enumt}{\thechapter}
 The following chapters are tentatively planned to complete the book.
+ \index{gamma function}
+ \index{cylinder function}
+ \index{Legendre polynomial}
+ \index{polynomial!Legendre}
+ \index{improvement of convergence}
+ \index{convergence!improvement of}
+ \index{conjugategradient algorithm}
+ \index{algorithm!conjugategradient}
+ \index{EulerMascheroni constant}
+ Future revisions of the book tentatively plan to add the following
+ chapters.
\begin{enumerate}
\setcounter{enumi}{\theenumt}
 \item \label{wave}
 The wave equation%
 \footnote{
 Chapter~\ref{wave} might begin with Poisson's equation and the
 corresponding static case. After treating the wave equation
 proper, it might end with the parabolic wave equation.
 }
+ \item \label{gamma}
+ The gamma function
\item \label{bessel}
Cylinder functions
\item \label{orthp}
 Orthogonal polynomials%
 \footnote{
 Chapter~\ref{orthp} would be pretty useless if it did not treat
 Legendre polynomials, so presumably it will do at least this.
 }%
 $\mbox{}^{,}$%
 \footnote{
 The author has not yet decided how to apportion the treatment of
 the wave equation in spherical geometries between
 Chs.~\ref{wave}, \ref{bessel} and~\ref{orthp}.
 }
+ Legendre polynomials
\item \label{xssc}
 Transformations to speed series convergence%
 \footnote{
 Chapter~\ref{xssc} is tentatively to treat at least the Poisson
 sum formula, Mosig's summationbyparts technique and, the
 author believes, the Watson transformation; plus maybe some
 others as seems appropriate. This might also be a good chapter
 in which to develop the infiniteproduct forms of the sine and
 the cosine and thence Euler's and Andrews' clever closedform
 series summations from~\cite[\S~1.7 and exercises]{Andrews} and
 maybe from other, similar sources.
 }
+ Acceleration of convergence
\item \label{cgrad}
The conjugategradient algorithm
 \item \label{rmrk}
 Remarks
\setcounter{enumt}{\theenumi}
 \end{enumerate}
 Chapters are likely yet to be inserted, removed, divided, combined and
 shuffled, but that's the planned outline at the moment.
+ \end{enumerate}%
+ Future revisions also tentatively plan to develop a method to
+ calculate the EulerMascheroni constant, but that method
+ is not expected to require a chapter of its own. It should fit in
+ one of the other chapters.%
+ %One would also like the book to treat a few further matters, such as
+ %Kepler's laws, but it is unclear how the book should fit some of these
+ %into its overall plan. A satisfactory presentation of Kepler's laws for
+ %example would want to be preceded by introductions to tensors,
+ %Hamiltonian mechanics and/or conic sectionswhich together could
+ %almost fill another book of their own. It would also point the way
+ %toward orbital mechanics, an exotic discipline that could fill
+ %yet a third book (and that the author lacks expertise to treat). A
+ %less satisfactory presentation could perhaps be given, but the outer
+ %limits of scope of such a book as this are somewhat arbitrary,
+ %anyway.
}
The book means to stop short of hypergeometric functions, parabolic
cylinder functions, selectivedimensional (Weyl and Sommerfeld) Fourier
transforms, wavelets, and iterative techniques more advanced than the
% bad break (but fixed)
con\ju\gategra\di\ent (the advanced iterative techniques being too
active an area of research for such a book as this yet to treat).
However, acknowledging the uniquely seminal historical importance
Kepler's laws, the book would like to add an appendix on the topic, to
precede the existing Appendix~\ref{hist}.

% Several of the tentatively planned chapters from~Ch.~\ref{iter} onward
% represent deep fields of study each wanting full books of their own. If
% written according to plan, few if any of these chapters would treat much
% more than a few general results from their respective fields.

% Yet further developments, if any, are hard to foresee.%
% \footnote{
% Any plansI should say, any wishesbeyond the topics listed are no
% better than daydreams. However, for my own notes if for no other
% reason, plausible topics include the following:
% Hamiltonian mechanics;
% electromagnetics;
% the statics of materials;
% the mechanics of materials;
% fluid mechanics;
% advanced special functions;
% thermodynamics and the mathematics of entropy;
% quantum mechanics;
% electric circuits;
% information theory;
% statistics.
% Life should be so long, eh? Well, we shall see. Like most authors
% perhaps, I write in my spare time, the supply of which is necessarily
% limited and unpredictable.
% (Family responsibilities and other duties take precedence. My wife
% says to me, ``You have a lot of chapters to write. It will take you a
% long time.'' She understates the problem.)
% The book targets the list ending in iterative techniques as its actual
% goal.
% }

% \subsubsection*{A personal note to the reader}
% \emph{Derivations of Applied Mathematics} belongs to the opensource
% tradition, which means that you as reader have a stake in it if you
% wish. If you have read the book, or a substantial fraction of it, as
% far as it has yet gone, then you can help to improve it. Check
% \texttt{http://www.derivations.org/} for the latest revision, then write
% me at \texttt{thb@derivations.org}. I would most expressly solicit your
% feedback on typos, misprints, false or missing symbols and the like;
% such errors only mar the manuscript, so no such correction is too small.
% On a higher plane, if you have found any part of the book unnecessarily
% confusing, please tell how so. On no particular plane, if you would
% tell me what you have done with your copy of the book, what you have
% learned from it, or how you have cited it, then write at your
% discretion.
%
% If you find a part of the book insufficiently rigorous, then that is
% another matter. I do not discourage such criticism and would be glad
% to hear it, but this book may not be well placed to meet it (the
% book might compromise by including a footnote that briefly suggests the
% outline of a more rigorous proof, but it tries not to distract the
% narrative by formalities that do not serve applications). If you want
% to detail H\"older spaces and Galois theory, or whatever, then my
% response is likely to be that there is already a surfeit of fine
% professional mathematics books in print; this just isn't that kind of
% book. On the other hand, the book does intend to derive every one of
% its results adequately from an applied perspective; if it fails to do so
% in your view then maybe you and I should discuss the matter. Finding
% the right balance is not always easy.
%
% At the time of this writing, readers are downloading the book at the
% rate of about four thousand copies per year directly through
% \texttt{derivations.org}. Some fraction of those, plus others who have
% installed the book as a Debian package or have acquired the book through
% secondary channels, actually have read it; now you stand among them.
%
% Write as appropriate. More to come.

% \nopagebreak
%
% \noindent\\
% THB

diff pruN 0.53.201204142/tex/taylor.tex 0.56.20180123.12/tex/taylor.tex
 0.53.201204142/tex/taylor.tex 20120415 15:43:56.000000000 +0000
+++ 0.56.20180123.12/tex/taylor.tex 20180116 16:16:38.000000000 +0000
@@ 7,15 +7,15 @@
\index{Taylor, Brook (16851731)}
\index{function!fitting of}
The Taylor series is a power series which fits a function in a
+The Taylor series is a power series that fits a function in a
limited domain neighborhood. Fitting a function in such a way
brings two advantages:
+brings at least two advantages:
\bi
\item it lets us take derivatives and integrals in the same
straightforward way~(\ref{drvtv:240:polyderivz})
 we take them with any power series; and
 \item it implies a simple procedure to calculate the function
 numerically.
+ one can take them given any power series; and
+ \item it implies a simple procedure to calculate values of the
+ function numerically.
\ei
This chapter introduces the Taylor series and some of its incidents. It
also derives Cauchy's integral formula. The chapter's early sections
@@ 40,11 +40,20 @@ prepare the ground for the treatment of
From another point of view, the chapter errs maybe toward too little
rigor. Some pretty constructs of pure mathematics serve the Taylor
series and Cauchy's integral formula. However, such constructs drive
 the applied mathematician on too long a detour. The chapter as
 written represents the most nearly satisfactory compromise the writer
 has been able to attain.
+ the applied mathematician on too long a detour (a detour
+ appendix~\ref{purec} briefly overviews). The chapter as written
+ represents the most nearly satisfactory compromise the writer has been
+ able to attain.%
}
+(The chapter's early sections, \S\S~\ref{taylor:314}
+and~\ref{taylor:317}, are thick with tiny algebraic details. The reader
+who does not wish, for now, to pick through tiny algebraic details can
+safely just skim the two sections and then turn ahead to start reading
+in \S~\ref{taylor:310}. Notwithstanding, the reader who would not skip
+details, howsoever tiny the details might be, can sharpen his pencil and
+continue to \S~\ref{taylor:314}, next.)
+
% 
\section{The powerseries expansion of $1/(1z)^{n+1}$}
@@ 110,12 +119,15 @@ or in other words that
a_{n(k1)} + a_{(n1)k} = a_{nk}.
\eq
Thinking of Pascal's triangle,~(\ref{taylor:314:30}) reminds
one of~(\ref{drvtv:220:37}), transcribed here in the symbols
+one of a formula of Table~\ref{drvtv:220:tbl}, transcribed here in the
+symbols
\bq{taylor:314:50}
\cmb{m1}{j1} + \cmb{m1}{j} = \cmb{m}{j},
\eq
except that~(\ref{taylor:314:30}) is not $a_{(m1)(j1)} + a_{(m1)j} =
a_{mj}$.
+%except that~(\ref{taylor:314:30}) is not $a_{(j1)(mj)} + a_{j(mj1)} =
+%a_{j(mj)}$.
\index{false try}
Various changes of variable are possible to
@@ 131,16 +143,17 @@ recommends itself. Thus changing in~(\r
\[
\cmb{n+k1}{k1} + \cmb{n+k1}{k} = \cmb{n+k}{k}.
\]
Transforming according to the rule~(\ref{drvtv:220:31}), this is
+Transforming according to a rule of
+Table~\ref{drvtv:220:tbl}, this is
\bq{taylor:314:51}
\cmb{n+[k1]}{n} + \cmb{[n1]+k}{n1} = \cmb{n+k}{n},
\eq
which fits~(\ref{taylor:314:30}) perfectly. Hence we conjecture that
+which fits~(\ref{taylor:314:30}) perfectly if
\bq{taylor:314:60}
 a_{nk} = \cmb{n+k}{n},
+ a_{nk} = \cmb{n+k}{n}.
\eq
which coefficients, applied to~(\ref{taylor:314:10}),
yield~(\ref{taylor:314:70}).
+Hence we conjecture that~(\ref{taylor:314:60}), applied
+to~(\ref{taylor:314:10}), would make~(\ref{taylor:314:70}) true.
Equation~(\ref{taylor:314:70}) is thus suggestive. It works at least
for the important case of $n=0$; this much is easy to test. In light
@@ 160,7 +173,7 @@ Consider the sum
\bq{taylor:315:10}
S_n \equiv \sum_{k=0}^{\infty} \cmb{n+k}{n} z^k.
\eq
Multiplying by $1z$ yields
+Multiplying by $1z$ yields that
\[
(1z)S_n = \sum_{k=0}^{\infty}
\left[ \cmb{n+k}{n}  \cmb{n+[k1]}{n} \right]
@@ 237,7 +250,7 @@ sum~(\ref{taylor:314:70}) converges.%
``uniform convergence,'' distinguishing it through a test devised by
Weierstrass from the weaker ``pointwise convergence''
\cite[\S~1.5]{Andrews}\@. The applied mathematician can profit
 substantially by learning the professional view in the matter, but
+ by learning the professional view in the matter but
the effect of trying to teach the professional view in a book like
this would not be pleasing. Here, we avoid error by keeping a
clear view of the physical phenomena the mathematics is meant to
@@ 247,7 +260,7 @@ sum~(\ref{tay