diff -pruN 0.15-1/debian/changelog 0.16-1/debian/changelog
--- 0.15-1/debian/changelog 2018-08-14 06:33:58.000000000 +0000
+++ 0.16-1/debian/changelog 2018-10-08 19:20:16.000000000 +0000
@@ -1,3 +1,9 @@
+python-numpysane (0.16-1) unstable; urgency=medium
+
+ * New upstream release
+
+ -- Dima Kogan Mon, 08 Oct 2018 12:20:16 -0700
+
python-numpysane (0.15-1) unstable; urgency=medium
* Initial release. (Closes: #893146)
diff -pruN 0.15-1/numpysane.py 0.16-1/numpysane.py
--- 0.15-1/numpysane.py 2018-07-11 22:48:43.000000000 +0000
+++ 0.16-1/numpysane.py 2018-10-08 19:17:30.000000000 +0000
@@ -8,7 +8,7 @@ r'''more-reasonable core functionality f
>>> a = np.arange(6).reshape(2,3)
>>> b = a + 100
- >>> row = a[0,:]
+ >>> row = a[0,:] + 1000
>>> a
array([[0, 1, 2],
@@ -59,10 +59,10 @@ have used PDL in the past.
Instead of writing a new module (this module), it would be really nice to simply
patch numpy to give everybody the more reasonable behavior. I'd be very happy to
do that, but the issues lie with some very core functionality, and any changes
-in behavior would likely break existing code. Any comments in how to achieve
-better behaviors in a less forky manner as welcome.
+in behavior would break existing code. Any comments in how to achieve better
+behaviors in a less forky manner are welcome.
-Finally, if the system DOES make sense in some way that I'm simply not
+Finally, if the existing system DOES make sense in some way that I'm simply not
understanding, I'm happy to listen. I have no intention to disparage anyone or
anything; I just want a more usable system for numerical computations.
@@ -87,8 +87,8 @@ identically-sized vectors (1-dimensional
arrays of shape (2,3,4), compute the 6 inner products of length-4 each, and
report the output in an array of shape (2,3). Numpy puts the most-significant
dimension at the end, which is why this isn't 12 inner products of length-2
-each. This is a semi-arbitrary design choice, which could have been made
-differently: PDL puts the most-significant dimension at the front, for instance.
+each. This is an arbitrary design choice, which could have been made
+differently: PDL puts the most-significant dimension at the front.
The user doesn't choose whether to use broadcasting or not: some functions
support it, and some do not. In PDL, broadcasting (called "threading" in that
@@ -101,11 +101,12 @@ By contrast, in numpy very few functions
the documentation is usually silent about the broadcasting status of a function
in question. And on top of THAT, broadcasting rules state that an array of
dimensions (n,m) is functionally identical to one of dimensions
-(1,1,1,....1,n,m). However, many numpy functions have special-case rules to
-create different behaviors for inputs with different numbers of dimensions, and
-this creates unexpected results. The effect of all this is a messy situation
-where the user is often not sure of the exact behavior of the functions they're
-calling, and trial and error is required to make the system do what one wants.
+(1,1,1,....1,n,m). Sadly, numpy does not respect its own broadcasting rules, and
+many functions have special-case logic to create different behaviors for inputs
+with different numbers of dimensions; and this creates unexpected results. The
+effect of all this is a messy situation where the user is often not sure of the
+exact behavior of the functions they're calling, and trial and error is required
+to make the system do what one wants.
*** Solution
This module contains functionality to make any arbitrary function broadcastable.
@@ -518,6 +519,9 @@ Broadcast-aware inner product. Identical
**** outer
Broadcast-aware outer product.
+**** norm2
+Broadcast-aware 2-norm. norm2(x) is identical to inner(x,x)
+
**** matmult
Broadcast-aware matrix multiplication
@@ -533,7 +537,7 @@ import inspect
from distutils.version import StrictVersion
# setup.py assumes the version is a simple string in '' quotes
-__version__ = '0.15'
+__version__ = '0.16'
# object needed for fancy slices. m[:] is exactly the same as m[_colon], but
# '_colon' is a normal python object, so it can be manipulated in ways that ':'
@@ -1720,7 +1724,7 @@ def dot(a, b, out=None):
array([5, 6, 7])
>>> nps.dot(a,b)
- array(20)
+ 20
this is identical to numpysane.inner(). for a conjugating version of this
function, use nps.vdot(). note that the numpy dot() has some special
@@ -1812,6 +1816,31 @@ def outer(a, b, out=None):
out.setfield(np.outer(a,b), out.dtype)
return out
+# Note that this explicitly isn't done with @broadcast_define. Instead I
+# implement the internals with core numpy routines. The advantage is that these
+# are some of very few numpy functions that support broadcasting, and they do so
+# on the C level, so their broadcasting loop is FAST. Much more so than my
+# current @broadcast_define loop
+def norm2(a, **kwargs):
+ r'''Broadcast-aware 2-norm. norm2(x) is identical to inner(x,x)
+
+ Synopsis:
+
+ >>> import numpy as np
+ >>> import numpysane as nps
+
+ >>> a = np.arange(3)
+ >>> a
+ array([0, 1, 2])
+
+ >>> nps.norm2(a)
+ 5
+
+ This is a convenience function to compute a 2-norm
+
+ '''
+ return inner(a,a, **kwargs)
+
# Could be implemented with a simple loop around np.dot():
#
# @broadcast_define( (('n', 'm'), ('m', 'l')), prototype_output=('n','l'), out_kwarg='out' )
diff -pruN 0.15-1/README.org 0.16-1/README.org
--- 0.15-1/README.org 2018-07-11 22:48:43.000000000 +0000
+++ 0.16-1/README.org 2018-10-08 19:17:30.000000000 +0000
@@ -8,7 +8,7 @@ numpysane: more-reasonable core function
>>> a = np.arange(6).reshape(2,3)
>>> b = a + 100
->>> row = a[0,:]
+>>> row = a[0,:] + 1000
>>> a
array([[0, 1, 2],
@@ -60,10 +60,10 @@ have used PDL in the past.
Instead of writing a new module (this module), it would be really nice to simply
patch numpy to give everybody the more reasonable behavior. I'd be very happy to
do that, but the issues lie with some very core functionality, and any changes
-in behavior would likely break existing code. Any comments in how to achieve
-better behaviors in a less forky manner as welcome.
+in behavior would break existing code. Any comments in how to achieve better
+behaviors in a less forky manner are welcome.
-Finally, if the system DOES make sense in some way that I'm simply not
+Finally, if the existing system DOES make sense in some way that I'm simply not
understanding, I'm happy to listen. I have no intention to disparage anyone or
anything; I just want a more usable system for numerical computations.
@@ -88,8 +88,8 @@ identically-sized vectors (1-dimensional
arrays of shape (2,3,4), compute the 6 inner products of length-4 each, and
report the output in an array of shape (2,3). Numpy puts the most-significant
dimension at the end, which is why this isn't 12 inner products of length-2
-each. This is a semi-arbitrary design choice, which could have been made
-differently: PDL puts the most-significant dimension at the front, for instance.
+each. This is an arbitrary design choice, which could have been made
+differently: PDL puts the most-significant dimension at the front.
The user doesn't choose whether to use broadcasting or not: some functions
support it, and some do not. In PDL, broadcasting (called "threading" in that
@@ -102,11 +102,12 @@ By contrast, in numpy very few functions
the documentation is usually silent about the broadcasting status of a function
in question. And on top of THAT, broadcasting rules state that an array of
dimensions (n,m) is functionally identical to one of dimensions
-(1,1,1,....1,n,m). However, many numpy functions have special-case rules to
-create different behaviors for inputs with different numbers of dimensions, and
-this creates unexpected results. The effect of all this is a messy situation
-where the user is often not sure of the exact behavior of the functions they're
-calling, and trial and error is required to make the system do what one wants.
+(1,1,1,....1,n,m). Sadly, numpy does not respect its own broadcasting rules, and
+many functions have special-case logic to create different behaviors for inputs
+with different numbers of dimensions; and this creates unexpected results. The
+effect of all this is a messy situation where the user is often not sure of the
+exact behavior of the functions they're calling, and trial and error is required
+to make the system do what one wants.
*** Solution
This module contains functionality to make any arbitrary function broadcastable.
@@ -547,6 +548,9 @@ Broadcast-aware inner product. Identical
**** outer
Broadcast-aware outer product.
+**** norm2
+Broadcast-aware 2-norm. norm2(x) is identical to inner(x,x)
+
**** matmult
Broadcast-aware matrix multiplication
@@ -1391,6 +1395,25 @@ those arguments must match the input pro
the standard broadcasting rules. Positional arguments past the first 2 and
all the keyword arguments are passed through untouched.
+** norm2()
+Broadcast-aware 2-norm. norm2(x) is identical to inner(x,x)
+
+Synopsis:
+
+#+BEGIN_EXAMPLE
+>>> import numpy as np
+>>> import numpysane as nps
+
+>>> a = np.arange(3)
+>>> a
+array([0, 1, 2])
+
+>>> nps.norm2(a)
+array(5)
+#+END_EXAMPLE
+
+This is a convenience function to compute a 2-norm
+
** matmult2()
Multiplication of two matrices
diff -pruN 0.15-1/test_numpysane.py 0.16-1/test_numpysane.py
--- 0.15-1/test_numpysane.py 2018-07-11 22:48:43.000000000 +0000
+++ 0.16-1/test_numpysane.py 2018-10-08 19:17:30.000000000 +0000
@@ -520,6 +520,16 @@ class TestNumpysane(unittest.TestCase):
self.assertValueShape( None, (0,), nps.glue, arr(0,), arr(0,), axis=-1 )
self.assertValueShape( None, (2,), nps.glue, arr(2,), arr(0,), axis=-1 )
self.assertValueShape( None, (2,), nps.glue, arr(0,), arr(2,), axis=-1 )
+ self.assertValueShape( None, (1,2,), nps.glue, arr(2,), arr(0,), axis=-2 )
+ self.assertValueShape( None, (1,2,), nps.glue, arr(0,), arr(2,), axis=-2 )
+
+ # same as before, but np.array(()) instead of np.arange(0)
+ self.assertValueShape( None, (0,), nps.glue, np.array(()), np.array(()), axis=-1 )
+ self.assertValueShape( None, (2,), nps.glue, arr(2,), np.array(()), axis=-1 )
+ self.assertValueShape( None, (2,), nps.glue, np.array(()),arr(2,), axis=-1 )
+ self.assertValueShape( None, (1,2,), nps.glue, arr(2,), np.array(()), axis=-2 )
+ self.assertValueShape( None, (1,2,), nps.glue, np.array(()), arr(2,), axis=-2 )
+
self.assertValueShape( None, (0,6), nps.glue, arr(0,3), arr(0,3), axis=-1 )
self.assertValueShape( None, (0,3), nps.glue, arr(0,3), arr(0,3), axis=-2 )
self.assertValueShape( None, (2,0,3), nps.glue, arr(0,3), arr(0,3), axis=-3 )