/* Generated by Cython 0.29.19 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_19" #define CYTHON_HEX_VERSION 0x001D13F0 #define CYTHON_FUTURE_DIVISION 0 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #else #define CYTHON_INLINE inline #endif #endif template void __Pyx_call_destructor(T& x) { x.~T(); } template class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } T *operator->() { return ptr; } T *operator&() { return ptr; } operator T&() { return *ptr; } template bool operator ==(U other) { return *ptr == other; } template bool operator !=(U other) { return *ptr != other; } private: T *ptr; }; #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__thinc__neural__ops #define __PYX_HAVE_API__thinc__neural__ops /* Early includes */ #include #include #include #include #include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #include "pythread.h" #include "pystate.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "ops.pyx", "__init__.pxd", "linalg.pxd", "stringsource", "cymem.pxd", "maps.pxd", "type.pxd", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* "preshed/maps.pxd":5 * * * ctypedef uint64_t key_t # <<<<<<<<<<<<<< * * */ typedef uint64_t __pyx_t_7preshed_4maps_key_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":777 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":799 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":808 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":810 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":811 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "typedefs.pxd":7 * * * ctypedef float weight_t # <<<<<<<<<<<<<< * ctypedef uint64_t atom_t * ctypedef uint64_t feat_t */ typedef float __pyx_t_5thinc_8typedefs_weight_t; /* "typedefs.pxd":8 * * ctypedef float weight_t * ctypedef uint64_t atom_t # <<<<<<<<<<<<<< * ctypedef uint64_t feat_t * ctypedef uint64_t hash_t */ typedef uint64_t __pyx_t_5thinc_8typedefs_atom_t; /* "typedefs.pxd":9 * ctypedef float weight_t * ctypedef uint64_t atom_t * ctypedef uint64_t feat_t # <<<<<<<<<<<<<< * ctypedef uint64_t hash_t * ctypedef int32_t class_t */ typedef uint64_t __pyx_t_5thinc_8typedefs_feat_t; /* "typedefs.pxd":10 * ctypedef uint64_t atom_t * ctypedef uint64_t feat_t * ctypedef uint64_t hash_t # <<<<<<<<<<<<<< * ctypedef int32_t class_t * ctypedef uint32_t count_t */ typedef uint64_t __pyx_t_5thinc_8typedefs_hash_t; /* "typedefs.pxd":11 * ctypedef uint64_t feat_t * ctypedef uint64_t hash_t * ctypedef int32_t class_t # <<<<<<<<<<<<<< * ctypedef uint32_t count_t * ctypedef uint32_t time_t */ typedef int32_t __pyx_t_5thinc_8typedefs_class_t; /* "typedefs.pxd":12 * ctypedef uint64_t hash_t * ctypedef int32_t class_t * ctypedef uint32_t count_t # <<<<<<<<<<<<<< * ctypedef uint32_t time_t * ctypedef int32_t len_t */ typedef uint32_t __pyx_t_5thinc_8typedefs_count_t; /* "typedefs.pxd":13 * ctypedef int32_t class_t * ctypedef uint32_t count_t * ctypedef uint32_t time_t # <<<<<<<<<<<<<< * ctypedef int32_t len_t * ctypedef int32_t idx_t */ typedef uint32_t __pyx_t_5thinc_8typedefs_time_t; /* "typedefs.pxd":14 * ctypedef uint32_t count_t * ctypedef uint32_t time_t * ctypedef int32_t len_t # <<<<<<<<<<<<<< * ctypedef int32_t idx_t * */ typedef int32_t __pyx_t_5thinc_8typedefs_len_t; /* "typedefs.pxd":15 * ctypedef uint32_t time_t * ctypedef int32_t len_t * ctypedef int32_t idx_t # <<<<<<<<<<<<<< * * */ typedef int32_t __pyx_t_5thinc_8typedefs_idx_t; /* "blis/cy.pxd":58 * * * ctypedef int64_t dim_t # <<<<<<<<<<<<<< * ctypedef int64_t inc_t * ctypedef int64_t doff_t */ typedef int64_t __pyx_t_4blis_2cy_dim_t; /* "blis/cy.pxd":59 * * ctypedef int64_t dim_t * ctypedef int64_t inc_t # <<<<<<<<<<<<<< * ctypedef int64_t doff_t * */ typedef int64_t __pyx_t_4blis_2cy_inc_t; /* "blis/cy.pxd":60 * ctypedef int64_t dim_t * ctypedef int64_t inc_t * ctypedef int64_t doff_t # <<<<<<<<<<<<<< * * */ typedef int64_t __pyx_t_4blis_2cy_doff_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ struct __pyx_obj_5cymem_5cymem_PyMalloc; struct __pyx_obj_5cymem_5cymem_PyFree; struct __pyx_obj_5cymem_5cymem_Pool; struct __pyx_obj_5cymem_5cymem_Address; struct __pyx_obj_7preshed_4maps_PreshMap; struct __pyx_obj_7preshed_4maps_PreshMapArray; struct __pyx_obj_5thinc_6linalg_Matrix; struct __pyx_obj_5thinc_6linalg_Vec; struct __pyx_obj_5thinc_6linalg_VecVec; struct __pyx_obj_5thinc_6linalg_Mat; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences; struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "cymem/cymem.pxd":1 * ctypedef void* (*malloc_t)(size_t n) # <<<<<<<<<<<<<< * ctypedef void (*free_t)(void *p) * */ typedef void *(*__pyx_t_5cymem_5cymem_malloc_t)(size_t); /* "cymem/cymem.pxd":2 * ctypedef void* (*malloc_t)(size_t n) * ctypedef void (*free_t)(void *p) # <<<<<<<<<<<<<< * * cdef class PyMalloc: */ typedef void (*__pyx_t_5cymem_5cymem_free_t)(void *); struct __pyx_t_7preshed_4maps_Cell; struct __pyx_t_7preshed_4maps_Result; struct __pyx_t_7preshed_4maps_MapStruct; /* "preshed/maps.pxd":8 * * * cdef struct Cell: # <<<<<<<<<<<<<< * key_t key * void* value */ struct __pyx_t_7preshed_4maps_Cell { __pyx_t_7preshed_4maps_key_t key; void *value; }; /* "preshed/maps.pxd":13 * * * cdef struct Result: # <<<<<<<<<<<<<< * int found * void* value */ struct __pyx_t_7preshed_4maps_Result { int found; void *value; }; /* "preshed/maps.pxd":18 * * * cdef struct MapStruct: # <<<<<<<<<<<<<< * Cell* cells * void* value_for_empty_key */ struct __pyx_t_7preshed_4maps_MapStruct { struct __pyx_t_7preshed_4maps_Cell *cells; void *value_for_empty_key; void *value_for_del_key; __pyx_t_7preshed_4maps_key_t length; __pyx_t_7preshed_4maps_key_t filled; int is_empty_key_set; int is_del_key_set; }; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":816 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* "blis/cy.pxd":66 * # the header into our header. * # We get some piece of mind from checking the values on init. * cpdef enum trans_t: # <<<<<<<<<<<<<< * NO_TRANSPOSE = 0 * TRANSPOSE = 8 */ enum __pyx_t_4blis_2cy_trans_t { __pyx_e_4blis_2cy_NO_TRANSPOSE = 0, __pyx_e_4blis_2cy_TRANSPOSE = 8, __pyx_e_4blis_2cy_CONJ_NO_TRANSPOSE = 16, __pyx_e_4blis_2cy_CONJ_TRANSPOSE = 24 }; /* "blis/cy.pxd":73 * * * cpdef enum conj_t: # <<<<<<<<<<<<<< * NO_CONJUGATE = 0 * CONJUGATE = 16 */ enum __pyx_t_4blis_2cy_conj_t { __pyx_e_4blis_2cy_NO_CONJUGATE = 0, __pyx_e_4blis_2cy_CONJUGATE = 16 }; /* "blis/cy.pxd":78 * * * cpdef enum side_t: # <<<<<<<<<<<<<< * LEFT = 0 * RIGHT = 1 */ enum __pyx_t_4blis_2cy_side_t { __pyx_e_4blis_2cy_LEFT = 0, __pyx_e_4blis_2cy_RIGHT = 1 }; /* "blis/cy.pxd":83 * * * cpdef enum uplo_t: # <<<<<<<<<<<<<< * LOWER = 192 * UPPER = 96 */ enum __pyx_t_4blis_2cy_uplo_t { __pyx_e_4blis_2cy_LOWER = 0xC0, __pyx_e_4blis_2cy_UPPER = 96, __pyx_e_4blis_2cy_DENSE = 0xE0 }; /* "blis/cy.pxd":89 * * * cpdef enum diag_t: # <<<<<<<<<<<<<< * NONUNIT_DIAG = 0 * UNIT_DIAG = 256 */ enum __pyx_t_4blis_2cy_diag_t { __pyx_e_4blis_2cy_NONUNIT_DIAG = 0, __pyx_e_4blis_2cy_UNIT_DIAG = 0x100 }; /* "blis/cy.pxd":6 * * * ctypedef float[::1] float1d_t # <<<<<<<<<<<<<< * ctypedef double[::1] double1d_t * ctypedef float[:, ::1] float2d_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_float1d_t; /* "blis/cy.pxd":7 * * ctypedef float[::1] float1d_t * ctypedef double[::1] double1d_t # <<<<<<<<<<<<<< * ctypedef float[:, ::1] float2d_t * ctypedef double[:, ::1] double2d_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_double1d_t; /* "blis/cy.pxd":8 * ctypedef float[::1] float1d_t * ctypedef double[::1] double1d_t * ctypedef float[:, ::1] float2d_t # <<<<<<<<<<<<<< * ctypedef double[:, ::1] double2d_t * ctypedef float* floats_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_float2d_t; /* "blis/cy.pxd":9 * ctypedef double[::1] double1d_t * ctypedef float[:, ::1] float2d_t * ctypedef double[:, ::1] double2d_t # <<<<<<<<<<<<<< * ctypedef float* floats_t * ctypedef double* doubles_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_double2d_t; /* "blis/cy.pxd":10 * ctypedef float[:, ::1] float2d_t * ctypedef double[:, ::1] double2d_t * ctypedef float* floats_t # <<<<<<<<<<<<<< * ctypedef double* doubles_t * ctypedef const float[::1] const_float1d_t */ typedef float *__pyx_t_4blis_2cy_floats_t; /* "blis/cy.pxd":11 * ctypedef double[:, ::1] double2d_t * ctypedef float* floats_t * ctypedef double* doubles_t # <<<<<<<<<<<<<< * ctypedef const float[::1] const_float1d_t * ctypedef const double[::1] const_double1d_t */ typedef double *__pyx_t_4blis_2cy_doubles_t; /* "blis/cy.pxd":12 * ctypedef float* floats_t * ctypedef double* doubles_t * ctypedef const float[::1] const_float1d_t # <<<<<<<<<<<<<< * ctypedef const double[::1] const_double1d_t * ctypedef const float[:, ::1] const_float2d_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_const_float1d_t; /* "blis/cy.pxd":13 * ctypedef double* doubles_t * ctypedef const float[::1] const_float1d_t * ctypedef const double[::1] const_double1d_t # <<<<<<<<<<<<<< * ctypedef const float[:, ::1] const_float2d_t * ctypedef const double[:, ::1] const_double2d_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_const_double1d_t; /* "blis/cy.pxd":14 * ctypedef const float[::1] const_float1d_t * ctypedef const double[::1] const_double1d_t * ctypedef const float[:, ::1] const_float2d_t # <<<<<<<<<<<<<< * ctypedef const double[:, ::1] const_double2d_t * ctypedef const float* const_floats_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_const_float2d_t; /* "blis/cy.pxd":15 * ctypedef const double[::1] const_double1d_t * ctypedef const float[:, ::1] const_float2d_t * ctypedef const double[:, ::1] const_double2d_t # <<<<<<<<<<<<<< * ctypedef const float* const_floats_t * ctypedef const double* const_doubles_t */ typedef __Pyx_memviewslice __pyx_t_4blis_2cy_const_double2d_t; /* "blis/cy.pxd":16 * ctypedef const float[:, ::1] const_float2d_t * ctypedef const double[:, ::1] const_double2d_t * ctypedef const float* const_floats_t # <<<<<<<<<<<<<< * ctypedef const double* const_doubles_t * */ typedef float const *__pyx_t_4blis_2cy_const_floats_t; /* "blis/cy.pxd":17 * ctypedef const double[:, ::1] const_double2d_t * ctypedef const float* const_floats_t * ctypedef const double* const_doubles_t # <<<<<<<<<<<<<< * * */ typedef double const *__pyx_t_4blis_2cy_const_doubles_t; /* "cymem/cymem.pxd":4 * ctypedef void (*free_t)(void *p) * * cdef class PyMalloc: # <<<<<<<<<<<<<< * cdef malloc_t malloc * cdef void _set(self, malloc_t malloc) */ struct __pyx_obj_5cymem_5cymem_PyMalloc { PyObject_HEAD struct __pyx_vtabstruct_5cymem_5cymem_PyMalloc *__pyx_vtab; __pyx_t_5cymem_5cymem_malloc_t malloc; }; /* "cymem/cymem.pxd":10 * cdef PyMalloc WrapMalloc(malloc_t malloc) * * cdef class PyFree: # <<<<<<<<<<<<<< * cdef free_t free * cdef void _set(self, free_t free) */ struct __pyx_obj_5cymem_5cymem_PyFree { PyObject_HEAD struct __pyx_vtabstruct_5cymem_5cymem_PyFree *__pyx_vtab; __pyx_t_5cymem_5cymem_free_t free; }; /* "cymem/cymem.pxd":16 * cdef PyFree WrapFree(free_t free) * * cdef class Pool: # <<<<<<<<<<<<<< * cdef readonly size_t size * cdef readonly dict addresses */ struct __pyx_obj_5cymem_5cymem_Pool { PyObject_HEAD struct __pyx_vtabstruct_5cymem_5cymem_Pool *__pyx_vtab; size_t size; PyObject *addresses; PyObject *refs; struct __pyx_obj_5cymem_5cymem_PyMalloc *pymalloc; struct __pyx_obj_5cymem_5cymem_PyFree *pyfree; }; /* "cymem/cymem.pxd":28 * * * cdef class Address: # <<<<<<<<<<<<<< * cdef void* ptr * cdef readonly PyMalloc pymalloc */ struct __pyx_obj_5cymem_5cymem_Address { PyObject_HEAD void *ptr; struct __pyx_obj_5cymem_5cymem_PyMalloc *pymalloc; struct __pyx_obj_5cymem_5cymem_PyFree *pyfree; }; /* "preshed/maps.pxd":45 * * * cdef class PreshMap: # <<<<<<<<<<<<<< * cdef MapStruct* c_map * cdef Pool mem */ struct __pyx_obj_7preshed_4maps_PreshMap { PyObject_HEAD struct __pyx_vtabstruct_7preshed_4maps_PreshMap *__pyx_vtab; struct __pyx_t_7preshed_4maps_MapStruct *c_map; struct __pyx_obj_5cymem_5cymem_Pool *mem; }; /* "preshed/maps.pxd":53 * * * cdef class PreshMapArray: # <<<<<<<<<<<<<< * cdef Pool mem * cdef MapStruct* maps */ struct __pyx_obj_7preshed_4maps_PreshMapArray { PyObject_HEAD struct __pyx_vtabstruct_7preshed_4maps_PreshMapArray *__pyx_vtab; struct __pyx_obj_5cymem_5cymem_Pool *mem; struct __pyx_t_7preshed_4maps_MapStruct *maps; size_t length; }; /* "linalg.pxd":22 * * * cdef class Matrix: # <<<<<<<<<<<<<< * cdef readonly Pool mem * cdef weight_t* data */ struct __pyx_obj_5thinc_6linalg_Matrix { PyObject_HEAD struct __pyx_obj_5cymem_5cymem_Pool *mem; __pyx_t_5thinc_8typedefs_weight_t *data; int32_t nr_row; int32_t nr_col; }; /* "linalg.pxd":29 * * * cdef class Vec: # <<<<<<<<<<<<<< * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: */ struct __pyx_obj_5thinc_6linalg_Vec { PyObject_HEAD struct __pyx_vtabstruct_5thinc_6linalg_Vec *__pyx_vtab; }; /* "linalg.pxd":158 * * * cdef class VecVec: # <<<<<<<<<<<<<< * @staticmethod * cdef inline void add(weight_t* output, */ struct __pyx_obj_5thinc_6linalg_VecVec { PyObject_HEAD struct __pyx_vtabstruct_5thinc_6linalg_VecVec *__pyx_vtab; }; /* "linalg.pxd":249 * * * cdef class Mat: # <<<<<<<<<<<<<< * @staticmethod * cdef inline void mean_row(weight_t* Ex, */ struct __pyx_obj_5thinc_6linalg_Mat { PyObject_HEAD struct __pyx_vtabstruct_5thinc_6linalg_Mat *__pyx_vtab; }; /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences { PyObject_HEAD PyObject *__pyx_v_inplace; PyObject *__pyx_v_masks; }; /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop { PyObject_HEAD struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *__pyx_outer_scope; PyObject *__pyx_v_backprop; }; /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout { PyObject_HEAD PyObject *__pyx_v_mask; }; /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop { PyObject_HEAD struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *__pyx_outer_scope; PyObject *__pyx_v_backprop; }; /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences { PyObject_HEAD PyObject *__pyx_v_extra_dims; PyObject *__pyx_v_indices; PyObject *__pyx_v_lengths; PyObject *__pyx_v_self; }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "cymem/cymem.pxd":4 * ctypedef void (*free_t)(void *p) * * cdef class PyMalloc: # <<<<<<<<<<<<<< * cdef malloc_t malloc * cdef void _set(self, malloc_t malloc) */ struct __pyx_vtabstruct_5cymem_5cymem_PyMalloc { void (*_set)(struct __pyx_obj_5cymem_5cymem_PyMalloc *, __pyx_t_5cymem_5cymem_malloc_t); }; static struct __pyx_vtabstruct_5cymem_5cymem_PyMalloc *__pyx_vtabptr_5cymem_5cymem_PyMalloc; /* "cymem/cymem.pxd":10 * cdef PyMalloc WrapMalloc(malloc_t malloc) * * cdef class PyFree: # <<<<<<<<<<<<<< * cdef free_t free * cdef void _set(self, free_t free) */ struct __pyx_vtabstruct_5cymem_5cymem_PyFree { void (*_set)(struct __pyx_obj_5cymem_5cymem_PyFree *, __pyx_t_5cymem_5cymem_free_t); }; static struct __pyx_vtabstruct_5cymem_5cymem_PyFree *__pyx_vtabptr_5cymem_5cymem_PyFree; /* "cymem/cymem.pxd":16 * cdef PyFree WrapFree(free_t free) * * cdef class Pool: # <<<<<<<<<<<<<< * cdef readonly size_t size * cdef readonly dict addresses */ struct __pyx_vtabstruct_5cymem_5cymem_Pool { void *(*alloc)(struct __pyx_obj_5cymem_5cymem_Pool *, size_t, size_t); void (*free)(struct __pyx_obj_5cymem_5cymem_Pool *, void *); void *(*realloc)(struct __pyx_obj_5cymem_5cymem_Pool *, void *, size_t); }; static struct __pyx_vtabstruct_5cymem_5cymem_Pool *__pyx_vtabptr_5cymem_5cymem_Pool; /* "preshed/maps.pxd":45 * * * cdef class PreshMap: # <<<<<<<<<<<<<< * cdef MapStruct* c_map * cdef Pool mem */ struct __pyx_vtabstruct_7preshed_4maps_PreshMap { void *(*get)(struct __pyx_obj_7preshed_4maps_PreshMap *, __pyx_t_7preshed_4maps_key_t); void (*set)(struct __pyx_obj_7preshed_4maps_PreshMap *, __pyx_t_7preshed_4maps_key_t, void *); }; static struct __pyx_vtabstruct_7preshed_4maps_PreshMap *__pyx_vtabptr_7preshed_4maps_PreshMap; /* "preshed/maps.pxd":53 * * * cdef class PreshMapArray: # <<<<<<<<<<<<<< * cdef Pool mem * cdef MapStruct* maps */ struct __pyx_vtabstruct_7preshed_4maps_PreshMapArray { void *(*get)(struct __pyx_obj_7preshed_4maps_PreshMapArray *, size_t, __pyx_t_7preshed_4maps_key_t); void (*set)(struct __pyx_obj_7preshed_4maps_PreshMapArray *, size_t, __pyx_t_7preshed_4maps_key_t, void *); }; static struct __pyx_vtabstruct_7preshed_4maps_PreshMapArray *__pyx_vtabptr_7preshed_4maps_PreshMapArray; /* "linalg.pxd":29 * * * cdef class Vec: # <<<<<<<<<<<<<< * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: */ struct __pyx_vtabstruct_5thinc_6linalg_Vec { int (*arg_max)(__pyx_t_5thinc_8typedefs_weight_t const *, int const ); __pyx_t_5thinc_8typedefs_weight_t (*max)(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); __pyx_t_5thinc_8typedefs_weight_t (*sum)(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); __pyx_t_5thinc_8typedefs_weight_t (*norm)(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); void (*add)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*add_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*mul)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*mul_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*pow)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*pow_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const , int32_t); void (*div)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*div_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const , int32_t); void (*exp)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); void (*exp_i)(__pyx_t_5thinc_8typedefs_weight_t *, int32_t); void (*reciprocal_i)(__pyx_t_5thinc_8typedefs_weight_t *, int32_t); __pyx_t_5thinc_8typedefs_weight_t (*mean)(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); __pyx_t_5thinc_8typedefs_weight_t (*variance)(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); }; static struct __pyx_vtabstruct_5thinc_6linalg_Vec *__pyx_vtabptr_5thinc_6linalg_Vec; static CYTHON_INLINE int __pyx_f_5thinc_6linalg_3Vec_arg_max(__pyx_t_5thinc_8typedefs_weight_t const *, int const ); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_max(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_sum(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_norm(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const , int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const , int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp_i(__pyx_t_5thinc_8typedefs_weight_t *, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_reciprocal_i(__pyx_t_5thinc_8typedefs_weight_t *, int32_t); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_mean(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_variance(__pyx_t_5thinc_8typedefs_weight_t const *, int32_t); /* "linalg.pxd":158 * * * cdef class VecVec: # <<<<<<<<<<<<<< * @staticmethod * cdef inline void add(weight_t* output, */ struct __pyx_vtabstruct_5thinc_6linalg_VecVec { void (*add)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*add_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*batch_add_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t, int32_t); void (*add_pow)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*add_pow_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); void (*mul)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); void (*mul_i)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); __pyx_t_5thinc_8typedefs_weight_t (*dot)(__pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); int (*arg_max_if_true)(__pyx_t_5thinc_8typedefs_weight_t const *, int const *, int const ); int (*arg_max_if_zero)(__pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int const ); }; static struct __pyx_vtabstruct_5thinc_6linalg_VecVec *__pyx_vtabptr_5thinc_6linalg_VecVec; static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_batch_add_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_6VecVec_dot(__pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t); static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_true(__pyx_t_5thinc_8typedefs_weight_t const *, int const *, int const ); static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_zero(__pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int const ); /* "linalg.pxd":249 * * * cdef class Mat: # <<<<<<<<<<<<<< * @staticmethod * cdef inline void mean_row(weight_t* Ex, */ struct __pyx_vtabstruct_5thinc_6linalg_Mat { void (*mean_row)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t, int32_t); void (*var_row)(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t, int32_t, __pyx_t_5thinc_8typedefs_weight_t); }; static struct __pyx_vtabstruct_5thinc_6linalg_Mat *__pyx_vtabptr_5thinc_6linalg_Mat; static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_mean_row(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t, int32_t); static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_var_row(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, int32_t, int32_t, __pyx_t_5thinc_8typedefs_weight_t); /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* Profile.proto */ #ifndef CYTHON_PROFILE #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON #define CYTHON_PROFILE 0 #else #define CYTHON_PROFILE 1 #endif #endif #ifndef CYTHON_TRACE_NOGIL #define CYTHON_TRACE_NOGIL 0 #else #if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE) #define CYTHON_TRACE 1 #endif #endif #ifndef CYTHON_TRACE #define CYTHON_TRACE 0 #endif #if CYTHON_TRACE #undef CYTHON_PROFILE_REUSE_FRAME #endif #ifndef CYTHON_PROFILE_REUSE_FRAME #define CYTHON_PROFILE_REUSE_FRAME 0 #endif #if CYTHON_PROFILE || CYTHON_TRACE #include "compile.h" #include "frameobject.h" #include "traceback.h" #if CYTHON_PROFILE_REUSE_FRAME #define CYTHON_FRAME_MODIFIER static #define CYTHON_FRAME_DEL(frame) #else #define CYTHON_FRAME_MODIFIER #define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame) #endif #define __Pyx_TraceDeclarations\ static PyCodeObject *__pyx_frame_code = NULL;\ CYTHON_FRAME_MODIFIER PyFrameObject *__pyx_frame = NULL;\ int __Pyx_use_tracing = 0; #define __Pyx_TraceFrameInit(codeobj)\ if (codeobj) __pyx_frame_code = (PyCodeObject*) codeobj; #ifdef WITH_THREAD #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error)\ if (nogil) {\ if (CYTHON_TRACE_NOGIL) {\ PyThreadState *tstate;\ PyGILState_STATE state = PyGILState_Ensure();\ tstate = __Pyx_PyThreadState_Current;\ if (unlikely(tstate->use_tracing) && !tstate->tracing &&\ (tstate->c_profilefunc || (CYTHON_TRACE && tstate->c_tracefunc))) {\ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&__pyx_frame_code, &__pyx_frame, tstate, funcname, srcfile, firstlineno);\ }\ PyGILState_Release(state);\ if (unlikely(__Pyx_use_tracing < 0)) goto_error;\ }\ } else {\ PyThreadState* tstate = PyThreadState_GET();\ if (unlikely(tstate->use_tracing) && !tstate->tracing &&\ (tstate->c_profilefunc || (CYTHON_TRACE && tstate->c_tracefunc))) {\ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&__pyx_frame_code, &__pyx_frame, tstate, funcname, srcfile, firstlineno);\ if (unlikely(__Pyx_use_tracing < 0)) goto_error;\ }\ } #else #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error)\ { PyThreadState* tstate = PyThreadState_GET();\ if (unlikely(tstate->use_tracing) && !tstate->tracing &&\ (tstate->c_profilefunc || (CYTHON_TRACE && tstate->c_tracefunc))) {\ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&__pyx_frame_code, &__pyx_frame, tstate, funcname, srcfile, firstlineno);\ if (unlikely(__Pyx_use_tracing < 0)) goto_error;\ }\ } #endif #define __Pyx_TraceException()\ if (likely(!__Pyx_use_tracing)); else {\ PyThreadState* tstate = __Pyx_PyThreadState_Current;\ if (tstate->use_tracing &&\ (tstate->c_profilefunc || (CYTHON_TRACE && tstate->c_tracefunc))) {\ tstate->tracing++;\ tstate->use_tracing = 0;\ PyObject *exc_info = __Pyx_GetExceptionTuple(tstate);\ if (exc_info) {\ if (CYTHON_TRACE && tstate->c_tracefunc)\ tstate->c_tracefunc(\ tstate->c_traceobj, __pyx_frame, PyTrace_EXCEPTION, exc_info);\ tstate->c_profilefunc(\ tstate->c_profileobj, __pyx_frame, PyTrace_EXCEPTION, exc_info);\ Py_DECREF(exc_info);\ }\ tstate->use_tracing = 1;\ tstate->tracing--;\ }\ } static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) { PyObject *type, *value, *traceback; __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); tstate->tracing++; tstate->use_tracing = 0; if (CYTHON_TRACE && tstate->c_tracefunc) tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result); if (tstate->c_profilefunc) tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result); CYTHON_FRAME_DEL(frame); tstate->use_tracing = 1; tstate->tracing--; __Pyx_ErrRestoreInState(tstate, type, value, traceback); } #ifdef WITH_THREAD #define __Pyx_TraceReturn(result, nogil)\ if (likely(!__Pyx_use_tracing)); else {\ if (nogil) {\ if (CYTHON_TRACE_NOGIL) {\ PyThreadState *tstate;\ PyGILState_STATE state = PyGILState_Ensure();\ tstate = __Pyx_PyThreadState_Current;\ if (tstate->use_tracing) {\ __Pyx_call_return_trace_func(tstate, __pyx_frame, (PyObject*)result);\ }\ PyGILState_Release(state);\ }\ } else {\ PyThreadState* tstate = __Pyx_PyThreadState_Current;\ if (tstate->use_tracing) {\ __Pyx_call_return_trace_func(tstate, __pyx_frame, (PyObject*)result);\ }\ }\ } #else #define __Pyx_TraceReturn(result, nogil)\ if (likely(!__Pyx_use_tracing)); else {\ PyThreadState* tstate = __Pyx_PyThreadState_Current;\ if (tstate->use_tracing) {\ __Pyx_call_return_trace_func(tstate, __pyx_frame, (PyObject*)result);\ }\ } #endif static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno); #else #define __Pyx_TraceDeclarations #define __Pyx_TraceFrameInit(codeobj) #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) if ((1)); else goto_error; #define __Pyx_TraceException() #define __Pyx_TraceReturn(result, nogil) #endif #if CYTHON_TRACE static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) { int ret; PyObject *type, *value, *traceback; __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); __Pyx_PyFrame_SetLineNumber(frame, lineno); tstate->tracing++; tstate->use_tracing = 0; ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL); tstate->use_tracing = 1; tstate->tracing--; if (likely(!ret)) { __Pyx_ErrRestoreInState(tstate, type, value, traceback); } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } return ret; } #ifdef WITH_THREAD #define __Pyx_TraceLine(lineno, nogil, goto_error)\ if (likely(!__Pyx_use_tracing)); else {\ if (nogil) {\ if (CYTHON_TRACE_NOGIL) {\ int ret = 0;\ PyThreadState *tstate;\ PyGILState_STATE state = PyGILState_Ensure();\ tstate = __Pyx_PyThreadState_Current;\ if (unlikely(tstate->use_tracing && tstate->c_tracefunc && __pyx_frame->f_trace)) {\ ret = __Pyx_call_line_trace_func(tstate, __pyx_frame, lineno);\ }\ PyGILState_Release(state);\ if (unlikely(ret)) goto_error;\ }\ } else {\ PyThreadState* tstate = __Pyx_PyThreadState_Current;\ if (unlikely(tstate->use_tracing && tstate->c_tracefunc && __pyx_frame->f_trace)) {\ int ret = __Pyx_call_line_trace_func(tstate, __pyx_frame, lineno);\ if (unlikely(ret)) goto_error;\ }\ }\ } #else #define __Pyx_TraceLine(lineno, nogil, goto_error)\ if (likely(!__Pyx_use_tracing)); else {\ PyThreadState* tstate = __Pyx_PyThreadState_Current;\ if (unlikely(tstate->use_tracing && tstate->c_tracefunc && __pyx_frame->f_trace)) {\ int ret = __Pyx_call_line_trace_func(tstate, __pyx_frame, lineno);\ if (unlikely(ret)) goto_error;\ }\ } #endif #else #define __Pyx_TraceLine(lineno, nogil, goto_error) if ((1)); else goto_error; #endif /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunctionShared.proto */ #define __Pyx_CyFunction_USED 1 #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; size_t defaults_size; // used by FusedFunction for copying defaults int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* CythonFunction.proto */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject *globals, PyObject* code); /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyIntCompare.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, long intval, long inplace); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* PyIntCompare.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_SubtractCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_SubtractCObj(op1, op2, floatval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_DivideCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_DivideCObj(op1, op2, floatval, inplace, zerodivision_check)\ ((inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2))) #endif /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_AddCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_AddCObj(op1, op2, floatval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_SubtractCObj(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* SliceObject.proto */ #define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)\ __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) static CYTHON_INLINE int __Pyx_PyObject_SetSlice( PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddCObj(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_AddObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_AddObjC(op1, op2, floatval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_NeObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_NeObjC(op1, op2, floatval, inplace, zerodivision_check)\ (PyObject_RichCompare(op1, op2, Py_NE)) #endif /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* BufferGetAndValidate.proto */ #define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ ((obj == Py_None || obj == NULL) ?\ (__Pyx_ZeroBuffer(buf), 0) :\ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static void __Pyx_ZeroBuffer(Py_buffer* buf); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* IncludeStringH.proto */ #include /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* PyFloatBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_SubtractObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); #else #define __Pyx_PyFloat_SubtractObjC(op1, op2, floatval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); #define __Pyx_PyObject_Dict_GetItem(obj, name)\ (likely(PyDict_CheckExact(obj)) ?\ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) #endif /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* GetVTable.proto */ static void* __Pyx_GetVtable(PyObject *dict); /* CalculateMetaclass.proto */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); /* SetNameInClass.proto */ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) #elif CYTHON_COMPILING_IN_CPYTHON #define __Pyx_SetNameInClass(ns, name, value)\ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) #else #define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) #endif /* Py3ClassCreate.proto */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc); static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float__const__(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t__const__(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int__const__(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int__const__(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_5thinc_8typedefs_weight_t(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_float__const__(const char *itemp); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_nn_uint64_t(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_nn_uint64_t(const char *itemp, PyObject *obj); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_nn_uint64_t__const__(const char *itemp); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_int__const__(const char *itemp); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int32_t(int32_t value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int32_t __Pyx_PyInt_As_int32_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* FunctionExport.proto */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static CYTHON_INLINE int __pyx_f_5thinc_6linalg_3Vec_arg_max(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, int const __pyx_v_n_classes); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_max(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_sum(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_norm(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_inc, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_inc, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t const __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t const __pyx_v_scal, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_reciprocal_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_mean(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, int32_t __pyx_v_nr_dim); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_variance(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, int32_t __pyx_v_nr_dim); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_batch_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr, int32_t __pyx_v_nr_batch); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_power, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_power, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_6VecVec_dot(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr); /* proto*/ static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_true(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, int const *__pyx_v_is_valid, int const __pyx_v_n_classes); /* proto*/ static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_zero(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_costs, int const __pyx_v_n_classes); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_mean_row(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_Ex, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_mat, int32_t __pyx_v_nr_row, int32_t __pyx_v_nr_col); /* proto*/ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_var_row(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_Vx, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_mat, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_Ex, int32_t __pyx_v_nr_row, int32_t __pyx_v_nr_col, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_eps); /* proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'libc.math' */ /* Module declarations from 'cymem.cymem' */ static PyTypeObject *__pyx_ptype_5cymem_5cymem_PyMalloc = 0; static PyTypeObject *__pyx_ptype_5cymem_5cymem_PyFree = 0; static PyTypeObject *__pyx_ptype_5cymem_5cymem_Pool = 0; static PyTypeObject *__pyx_ptype_5cymem_5cymem_Address = 0; /* Module declarations from 'preshed.maps' */ static PyTypeObject *__pyx_ptype_7preshed_4maps_PreshMap = 0; static PyTypeObject *__pyx_ptype_7preshed_4maps_PreshMapArray = 0; /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'thinc.typedefs' */ /* Module declarations from 'thinc.linalg' */ static PyTypeObject *__pyx_ptype_5thinc_6linalg_Matrix = 0; static PyTypeObject *__pyx_ptype_5thinc_6linalg_Vec = 0; static PyTypeObject *__pyx_ptype_5thinc_6linalg_VecVec = 0; static PyTypeObject *__pyx_ptype_5thinc_6linalg_Mat = 0; /* Module declarations from 'murmurhash.mrmr' */ static uint64_t (*__pyx_f_10murmurhash_4mrmr_hash64)(void *, int, uint64_t); /*proto*/ static void (*__pyx_f_10murmurhash_4mrmr_hash128_x86)(void const *, int, uint32_t, void *); /*proto*/ static void (*__pyx_f_10murmurhash_4mrmr_hash128_x64)(void const *, int, uint32_t, void *); /*proto*/ /* Module declarations from 'blis.cy' */ /* Module declarations from 'blis' */ /* Module declarations from 'thinc.neural.ops' */ static PyTypeObject *__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences = 0; static PyTypeObject *__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop = 0; static PyTypeObject *__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_2_dropout = 0; static PyTypeObject *__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop = 0; static PyTypeObject *__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences = 0; static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_5thinc_6neural_3ops_seq2col(float *, float const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_backprop_seq2col(float *, float const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_maxout(float *, int *, float const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_maxout(float *, float const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_mean_pool(float *, float const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_mean_pool(float *, float const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_max_pool(float *, int *, float const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_max_pool(float *, float const *, int const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_position_encode(float *, float, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_scatter_add(float *, int const *, float const *, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops__adam_momentum(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t *, int, __pyx_t_5thinc_8typedefs_weight_t, __pyx_t_5thinc_8typedefs_weight_t, __pyx_t_5thinc_8typedefs_weight_t, __pyx_t_5thinc_8typedefs_weight_t); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_mish(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, float, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_mish(__pyx_t_5thinc_8typedefs_weight_t *, __pyx_t_5thinc_8typedefs_weight_t const *, __pyx_t_5thinc_8typedefs_weight_t const *, float, int); /*proto*/ static PyObject *__pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(float *, PyObject *); /*proto*/ static PyObject *__pyx_f_5thinc_6neural_3ops_cpu_ints_ptr2array(int *, PyObject *); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_sum_pool(float *, float const *, int const *, int, int, int); /*proto*/ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_sum_pool(float *, float const *, int const *, int, int, int); /*proto*/ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_sigmoid(float); /*proto*/ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_dsigmoid(float); /*proto*/ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_dtanh(float); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn_uint64_t = { "uint64_t", NULL, sizeof(uint64_t), { 0 }, 0, IS_UNSIGNED(uint64_t) ? 'U' : 'I', IS_UNSIGNED(uint64_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn_uint32_t = { "uint32_t", NULL, sizeof(uint32_t), { 0 }, 0, IS_UNSIGNED(uint32_t) ? 'U' : 'I', IS_UNSIGNED(uint32_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_float__const__ = { "const float", NULL, sizeof(float const ), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn_uint64_t__const__ = { "const uint64_t", NULL, sizeof(uint64_t const ), { 0 }, 0, IS_UNSIGNED(uint64_t const ) ? 'U' : 'I', IS_UNSIGNED(uint64_t const ), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int__const__ = { "const int", NULL, sizeof(int const ), { 0 }, 0, IS_UNSIGNED(int const ) ? 'U' : 'I', IS_UNSIGNED(int const ), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5thinc_8typedefs_weight_t = { "weight_t", NULL, sizeof(__pyx_t_5thinc_8typedefs_weight_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "thinc.neural.ops" extern int __pyx_module_is_main_thinc__neural__ops; int __pyx_module_is_main_thinc__neural__ops = 0; /* Implementation of 'thinc.neural.ops' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_object; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_max; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_zip; static PyObject *__pyx_builtin_NotImplementedError; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_B[] = "B"; static const char __pyx_k_C[] = "C"; static const char __pyx_k_D[] = "D"; static const char __pyx_k_I[] = "I"; static const char __pyx_k_N[] = "N"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_P[] = "P"; static const char __pyx_k_T[] = "T"; static const char __pyx_k_W[] = "W"; static const char __pyx_k_X[] = "X"; static const char __pyx_k_Y[] = "Y"; static const char __pyx_k_a[] = "a"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_f[] = "f"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_m[] = "m"; static const char __pyx_k_n[] = "n"; static const char __pyx_k_t[] = "t"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_y[] = "y"; static const char __pyx_k_Xs[] = "Xs"; static const char __pyx_k_at[] = "at"; static const char __pyx_k_ct[] = "ct"; static const char __pyx_k_dX[] = "dX"; static const char __pyx_k_dY[] = "dY"; static const char __pyx_k_dc[] = "dc"; static const char __pyx_k_dx[] = "dx"; static const char __pyx_k_dy[] = "dy"; static const char __pyx_k_hc[] = "hc"; static const char __pyx_k_hf[] = "hf"; static const char __pyx_k_hi[] = "hi"; static const char __pyx_k_ho[] = "ho"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_nB[] = "nB"; static const char __pyx_k_nF[] = "nF"; static const char __pyx_k_nP[] = "nP"; static const char __pyx_k_nS[] = "nS"; static const char __pyx_k_nW[] = "nW"; static const char __pyx_k_py[] = "py"; static const char __pyx_k_xp[] = "xp"; static const char __pyx_k_Ops[] = "Ops"; static const char __pyx_k_add[] = "add"; static const char __pyx_k_arr[] = "arr"; static const char __pyx_k_cpu[] = "cpu"; static const char __pyx_k_dhc[] = "dhc"; static const char __pyx_k_dhf[] = "dhf"; static const char __pyx_k_dhi[] = "dhi"; static const char __pyx_k_dho[] = "dho"; static const char __pyx_k_doc[] = "__doc__"; static const char __pyx_k_dot[] = "dot"; static const char __pyx_k_elu[] = "elu"; static const char __pyx_k_ema[] = "ema"; static const char __pyx_k_eps[] = "eps"; static const char __pyx_k_exp[] = "exp"; static const char __pyx_k_get[] = "get"; static const char __pyx_k_gpu[] = "gpu"; static const char __pyx_k_ids[] = "ids"; static const char __pyx_k_key[] = "key"; static const char __pyx_k_loc[] = "loc"; static const char __pyx_k_log[] = "log"; static const char __pyx_k_max[] = "max"; static const char __pyx_k_mem[] = "mem"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_out[] = "out"; static const char __pyx_k_pad[] = "pad"; static const char __pyx_k_seq[] = "seq"; static const char __pyx_k_src[] = "src"; static const char __pyx_k_sum[] = "sum"; static const char __pyx_k_zip[] = "zip"; static const char __pyx_k_Xsub[] = "Xsub"; static const char __pyx_k__144[] = ""; static const char __pyx_k_adam[] = "adam"; static const char __pyx_k_args[] = "args"; static const char __pyx_k_axes[] = "axes"; static const char __pyx_k_axis[] = "axis"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_best[] = "best"; static const char __pyx_k_bias[] = "bias"; static const char __pyx_k_blis[] = "blis"; static const char __pyx_k_clip[] = "clip"; static const char __pyx_k_cols[] = "cols"; static const char __pyx_k_copy[] = "copy"; static const char __pyx_k_cuda[] = "cuda"; static const char __pyx_k_cupy[] = "cupy"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_dest[] = "dest"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_drop[] = "drop"; static const char __pyx_k_fill[] = "fill"; static const char __pyx_k_gemm[] = "gemm"; static const char __pyx_k_hash[] = "hash"; static const char __pyx_k_init[] = "__init__"; static const char __pyx_k_keys[] = "keys_"; static const char __pyx_k_loss[] = "loss"; static const char __pyx_k_lstm[] = "lstm"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mask[] = "mask"; static const char __pyx_k_mish[] = "mish"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_mom1[] = "mom1"; static const char __pyx_k_mom2[] = "mom2"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_norm[] = "norm"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_prev[] = "prev"; static const char __pyx_k_prod[] = "prod"; static const char __pyx_k_relu[] = "relu"; static const char __pyx_k_seed[] = "seed"; static const char __pyx_k_self[] = "self"; static const char __pyx_k_selu[] = "selu"; static const char __pyx_k_seqs[] = "seqs"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_sort[] = "sort"; static const char __pyx_k_sqrt[] = "sqrt"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_sums[] = "sums"; static const char __pyx_k_tanh[] = "tanh"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_util[] = "util"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_Sized[] = "Sized"; static const char __pyx_k_Xsoft[] = "Xsoft"; static const char __pyx_k_Y_ptr[] = "Y_ptr"; static const char __pyx_k_alpha[] = "alpha"; static const char __pyx_k_arr_i[] = "arr_i"; static const char __pyx_k_array[] = "array"; static const char __pyx_k_beta1[] = "beta1"; static const char __pyx_k_beta2[] = "beta2"; static const char __pyx_k_cells[] = "cells"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dXsub[] = "dXsub"; static const char __pyx_k_dYsub[] = "dYsub"; static const char __pyx_k_decay[] = "decay"; static const char __pyx_k_delta[] = "delta_"; static const char __pyx_k_dtanh[] = "dtanh"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_inits[] = "inits"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_log1p[] = "log1p"; static const char __pyx_k_masks[] = "masks"; static const char __pyx_k_maxes[] = "maxes"; static const char __pyx_k_means[] = "means"; static const char __pyx_k_new_x[] = "new_x"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_omega[] = "omega"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_out_2[] = "out_"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_scale[] = "scale"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_sumdx[] = "sumdx"; static const char __pyx_k_unpad[] = "unpad"; static const char __pyx_k_unzip[] = "unzip"; static const char __pyx_k_value[] = "value"; static const char __pyx_k_where[] = "where"; static const char __pyx_k_which[] = "which"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_affine[] = "affine"; static const char __pyx_k_argmax[] = "argmax"; static const char __pyx_k_compat[] = "compat"; static const char __pyx_k_dX__bo[] = "dX__bo"; static const char __pyx_k_dX_ptr[] = "dX_ptr"; static const char __pyx_k_d_prev[] = "d_prev"; static const char __pyx_k_d_sums[] = "d_sums"; static const char __pyx_k_device[] = "device"; static const char __pyx_k_dotted[] = "dotted"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_fan_in[] = "fan_in"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_ids_mv[] = "ids_mv"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_inputs[] = "inputs"; static const char __pyx_k_keys_2[] = "keys"; static const char __pyx_k_kwargs[] = "kwargs"; static const char __pyx_k_length[] = "length"; static const char __pyx_k_linalg[] = "linalg"; static const char __pyx_k_log_yp[] = "log_yp"; static const char __pyx_k_mapped[] = "mapped"; static const char __pyx_k_masked[] = "masked"; static const char __pyx_k_matmul[] = "matmul"; static const char __pyx_k_maxout[] = "maxout"; static const char __pyx_k_memptr[] = "memptr"; static const char __pyx_k_module[] = "__module__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_ngrams[] = "ngrams"; static const char __pyx_k_normal[] = "normal"; static const char __pyx_k_object[] = "object"; static const char __pyx_k_output[] = "output"; static const char __pyx_k_padded[] = "padded"; static const char __pyx_k_period[] = "period"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_random[] = "random"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_result[] = "result"; static const char __pyx_k_seqs_i[] = "seqs_i"; static const char __pyx_k_signal[] = "signal"; static const char __pyx_k_starts[] = "_starts"; static const char __pyx_k_stride[] = "stride"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_summed[] = "summed"; static const char __pyx_k_to_add[] = "_to_add"; static const char __pyx_k_to_sum[] = "to_sum"; static const char __pyx_k_trans1[] = "trans1"; static const char __pyx_k_trans2[] = "trans2"; static const char __pyx_k_uint32[] = "uint32"; static const char __pyx_k_uint64[] = "uint64"; static const char __pyx_k_unflat[] = "unflat"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_workon[] = "workon"; static const char __pyx_k_y_pred[] = "y_pred"; static const char __pyx_k_y_true[] = "y_true"; static const char __pyx_k_CupyOps[] = "CupyOps"; static const char __pyx_k_Ops_dot[] = "Ops.dot"; static const char __pyx_k_add_sum[] = "add_sum"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_blis_py[] = "blis.py"; static const char __pyx_k_dX__bop[] = "dX__bop"; static const char __pyx_k_d_cells[] = "d_cells"; static const char __pyx_k_d_maxes[] = "d_maxes"; static const char __pyx_k_d_means[] = "d_means"; static const char __pyx_k_delta_2[] = "delta"; static const char __pyx_k_dropout[] = "dropout"; static const char __pyx_k_entropy[] = "entropy"; static const char __pyx_k_flatten[] = "flatten"; static const char __pyx_k_float32[] = "float32"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_in_size[] = "in_size"; static const char __pyx_k_indices[] = "indices"; static const char __pyx_k_inplace[] = "inplace"; static const char __pyx_k_lengths[] = "lengths"; static const char __pyx_k_logloss[] = "logloss"; static const char __pyx_k_mapping[] = "mapping"; static const char __pyx_k_maximum[] = "maximum"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_n_items[] = "n_items"; static const char __pyx_k_n_slice[] = "n_slice"; static const char __pyx_k_ndarray[] = "ndarray"; static const char __pyx_k_ops_pyx[] = "ops.pyx"; static const char __pyx_k_pointer[] = "pointer"; static const char __pyx_k_prepare[] = "__prepare__"; static const char __pyx_k_py_best[] = "py_best"; static const char __pyx_k_reshape[] = "reshape"; static const char __pyx_k_reverse[] = "reverse"; static const char __pyx_k_seq2col[] = "seq2col"; static const char __pyx_k_shifted[] = "shifted"; static const char __pyx_k_sigmoid[] = "sigmoid"; static const char __pyx_k_softmax[] = "softmax"; static const char __pyx_k_uniform[] = "uniform"; static const char __pyx_k_weights[] = "weights"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_NumpyOps[] = "NumpyOps"; static const char __pyx_k_Ops_adam[] = "Ops.adam"; static const char __pyx_k_Ops_lstm[] = "Ops.lstm"; static const char __pyx_k_Ops_mish[] = "Ops.mish"; static const char __pyx_k_Ops_norm[] = "Ops.norm"; static const char __pyx_k_allocate[] = "allocate"; static const char __pyx_k_backprop[] = "backprop"; static const char __pyx_k_clip_low[] = "clip_low"; static const char __pyx_k_d_output[] = "d_output"; static const char __pyx_k_data_ptr[] = "data_ptr"; static const char __pyx_k_dsigmoid[] = "dsigmoid"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_gradient[] = "gradient"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_keepdims[] = "keepdims"; static const char __pyx_k_max_pool[] = "max_pool"; static const char __pyx_k_mod_rate[] = "mod_rate"; static const char __pyx_k_output_2[] = "output_"; static const char __pyx_k_py_cands[] = "py_cands"; static const char __pyx_k_py_which[] = "py_which"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_qualname[] = "__qualname__"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_softplus[] = "softplus"; static const char __pyx_k_starts_2[] = "starts"; static const char __pyx_k_sum_pool[] = "sum_pool"; static const char __pyx_k_timestep[] = "timestep"; static const char __pyx_k_to_add_2[] = "to_add"; static const char __pyx_k_unpadded[] = "unpadded"; static const char __pyx_k_variance[] = "variance"; static const char __pyx_k_Ops_dtanh[] = "Ops.dtanh"; static const char __pyx_k_Ops_unzip[] = "Ops.unzip"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_batch_dot[] = "batch_dot"; static const char __pyx_k_coinflips[] = "coinflips"; static const char __pyx_k_cupy_cuda[] = "cupy.cuda"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_grad_norm[] = "grad_norm"; static const char __pyx_k_log1p_exp[] = "log1p_exp"; static const char __pyx_k_max_decay[] = "max_decay"; static const char __pyx_k_mean_pool[] = "mean_pool"; static const char __pyx_k_metaclass[] = "__metaclass__"; static const char __pyx_k_nr_weight[] = "nr_weight"; static const char __pyx_k_out_array[] = "out_array"; static const char __pyx_k_positions[] = "positions"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_remap_ids[] = "remap_ids"; static const char __pyx_k_signal_in[] = "signal_in_"; static const char __pyx_k_tensordot[] = "tensordot"; static const char __pyx_k_threshold[] = "threshold"; static const char __pyx_k_transpose[] = "transpose"; static const char __pyx_k_unflatten[] = "unflatten"; static const char __pyx_k_which__bo[] = "which__bo"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_Ops___init[] = "Ops.__init__"; static const char __pyx_k_Ops_affine[] = "Ops.affine"; static const char __pyx_k_Ops_argmax[] = "Ops.argmax"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_act_pieces[] = "act_pieces"; static const char __pyx_k_copy_array[] = "copy_array"; static const char __pyx_k_extra_dims[] = "extra_dims"; static const char __pyx_k_learn_rate[] = "learn_rate"; static const char __pyx_k_output_arr[] = "output_arr"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_signal_out[] = "signal_out_"; static const char __pyx_k_take_which[] = "take_which"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_Ops_add_sum[] = "Ops.add_sum"; static const char __pyx_k_Ops_asarray[] = "Ops.asarray"; static const char __pyx_k_Ops_dropout[] = "Ops.dropout"; static const char __pyx_k_Ops_flatten[] = "Ops.flatten"; static const char __pyx_k_Ops_logloss[] = "Ops.logloss"; static const char __pyx_k_Ops_seq2col[] = "Ops.seq2col"; static const char __pyx_k_Ops_sigmoid[] = "Ops.sigmoid"; static const char __pyx_k_Ops_softmax[] = "Ops.softmax"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_collections[] = "collections"; static const char __pyx_k_concatenate[] = "concatenate"; static const char __pyx_k_expand_dims[] = "expand_dims"; static const char __pyx_k_gate_pieces[] = "gate_pieces"; static const char __pyx_k_inplace_add[] = "inplace_add"; static const char __pyx_k_noise_level[] = "noise_level"; static const char __pyx_k_normal_init[] = "normal_init"; static const char __pyx_k_scatter_add[] = "scatter_add"; static const char __pyx_k_signal_in_2[] = "signal_in"; static const char __pyx_k_whole_array[] = "whole_array"; static const char __pyx_k_CupyOps_adam[] = "CupyOps.adam"; static const char __pyx_k_CupyOps_gemm[] = "CupyOps.gemm"; static const char __pyx_k_CupyOps_hash[] = "CupyOps.hash"; static const char __pyx_k_CupyOps_mish[] = "CupyOps.mish"; static const char __pyx_k_CupyOps_relu[] = "CupyOps.relu"; static const char __pyx_k_CupyOps_selu[] = "CupyOps.selu"; static const char __pyx_k_NumpyOps_elu[] = "NumpyOps.elu"; static const char __pyx_k_Ops_allocate[] = "Ops.allocate"; static const char __pyx_k_Ops_clip_low[] = "Ops.clip_low"; static const char __pyx_k_Ops_dsigmoid[] = "Ops.dsigmoid"; static const char __pyx_k_Ops_softplus[] = "Ops.softplus"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_backprop_elu[] = "backprop_elu"; static const char __pyx_k_c_contiguous[] = "c_contiguous"; static const char __pyx_k_contig_array[] = "contig_array"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_signal_out_2[] = "signal_out"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_MemoryPointer[] = "MemoryPointer"; static const char __pyx_k_NumpyOps_adam[] = "NumpyOps.adam"; static const char __pyx_k_NumpyOps_gemm[] = "NumpyOps.gemm"; static const char __pyx_k_NumpyOps_hash[] = "NumpyOps.hash"; static const char __pyx_k_NumpyOps_mish[] = "NumpyOps.mish"; static const char __pyx_k_NumpyOps_relu[] = "NumpyOps.relu"; static const char __pyx_k_NumpyOps_selu[] = "NumpyOps.selu"; static const char __pyx_k_Ops_batch_dot[] = "Ops.batch_dot"; static const char __pyx_k_Ops_unflatten[] = "Ops.unflatten"; static const char __pyx_k_aligned_alloc[] = "_aligned_alloc"; static const char __pyx_k_backprop_lstm[] = "backprop_lstm"; static const char __pyx_k_backprop_mish[] = "backprop_mish"; static const char __pyx_k_backprop_relu[] = "backprop_relu"; static const char __pyx_k_backprop_selu[] = "backprop_selu"; static const char __pyx_k_backprop_take[] = "backprop_take"; static const char __pyx_k_clip_gradient[] = "clip_gradient"; static const char __pyx_k_contig_starts[] = "contig_starts"; static const char __pyx_k_contig_to_add[] = "contig_to_add"; static const char __pyx_k_d_gate_pieces[] = "d_gate_pieces"; static const char __pyx_k_finish_update[] = "finish_update"; static const char __pyx_k_integer_types[] = "integer_types"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_wrap_backprop[] = "wrap_backprop"; static const char __pyx_k_zeros_aligned[] = "zeros_aligned"; static const char __pyx_k_CupyOps_matmul[] = "CupyOps.matmul"; static const char __pyx_k_CupyOps_maxout[] = "CupyOps.maxout"; static const char __pyx_k_Ops_take_which[] = "Ops.take_which"; static const char __pyx_k_custom_kernels[] = "_custom_kernels"; static const char __pyx_k_he_normal_init[] = "he_normal_init"; static const char __pyx_k_CupyOps_asarray[] = "CupyOps.asarray"; static const char __pyx_k_CupyOps_seq2col[] = "CupyOps.seq2col"; static const char __pyx_k_NumpyOps_affine[] = "NumpyOps.affine"; static const char __pyx_k_NumpyOps_matmul[] = "NumpyOps.matmul"; static const char __pyx_k_NumpyOps_maxout[] = "NumpyOps.maxout"; static const char __pyx_k_NumpyOps_ngrams[] = "NumpyOps.ngrams"; static const char __pyx_k_Ops_expand_dims[] = "Ops.expand_dims"; static const char __pyx_k_Ops_normal_init[] = "Ops.normal_init"; static const char __pyx_k_T_param_T_m_T_v[] = "T param, T m, T v"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_add_batch_outer[] = "add_batch_outer"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_backprop_maxout[] = "backprop_maxout"; static const char __pyx_k_batch_size_at_t[] = "batch_size_at_t"; static const char __pyx_k_collections_abc[] = "collections.abc"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_lengths_indices[] = "lengths_indices"; static const char __pyx_k_position_encode[] = "position_encode"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_update_averages[] = "update_averages"; static const char __pyx_k_CupyOps_max_pool[] = "CupyOps.max_pool"; static const char __pyx_k_CupyOps_sum_pool[] = "CupyOps.sum_pool"; static const char __pyx_k_NumpyOps_add_sum[] = "NumpyOps.add_sum"; static const char __pyx_k_NumpyOps_asarray[] = "NumpyOps.asarray"; static const char __pyx_k_NumpyOps_seq2col[] = "NumpyOps.seq2col"; static const char __pyx_k_backprop_seq2col[] = "backprop_seq2col"; static const char __pyx_k_backprop_softmax[] = "backprop_softmax"; static const char __pyx_k_get_array_module[] = "get_array_module"; static const char __pyx_k_get_dropout_mask[] = "get_dropout_mask"; static const char __pyx_k_increment_slices[] = "increment_slices"; static const char __pyx_k_square_sequences[] = "square_sequences"; static const char __pyx_k_thinc_neural_ops[] = "thinc.neural.ops"; static const char __pyx_k_CupyOps_mean_pool[] = "CupyOps.mean_pool"; static const char __pyx_k_ElementwiseKernel[] = "ElementwiseKernel"; static const char __pyx_k_NumpyOps_allocate[] = "NumpyOps.allocate"; static const char __pyx_k_NumpyOps_max_pool[] = "NumpyOps.max_pool"; static const char __pyx_k_NumpyOps_sum_pool[] = "NumpyOps.sum_pool"; static const char __pyx_k_Ops_backprop_lstm[] = "Ops.backprop_lstm"; static const char __pyx_k_Ops_backprop_mish[] = "Ops.backprop_mish"; static const char __pyx_k_Ops_backprop_take[] = "Ops.backprop_take"; static const char __pyx_k_Ops_clip_gradient[] = "Ops.clip_gradient"; static const char __pyx_k_ascontiguousarray[] = "ascontiguousarray"; static const char __pyx_k_backprop_max_pool[] = "backprop_max_pool"; static const char __pyx_k_backprop_softplus[] = "backprop_softplus"; static const char __pyx_k_backprop_sum_pool[] = "backprop_sum_pool"; static const char __pyx_k_cpu_clip_gradient[] = "cpu_clip_gradient"; static const char __pyx_k_dropout_sequences[] = "dropout_sequences"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_softmax_sequences[] = "softmax_sequences"; static const char __pyx_k_NumpyOps_mean_pool[] = "NumpyOps.mean_pool"; static const char __pyx_k_NumpyOps_remap_ids[] = "NumpyOps.remap_ids"; static const char __pyx_k_Ops_he_normal_init[] = "Ops.he_normal_init"; static const char __pyx_k_add_gradient_noise[] = "add_gradient_noise"; static const char __pyx_k_backprop_mean_pool[] = "backprop_mean_pool"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_compile_with_cache[] = "compile_with_cache"; static const char __pyx_k_cupy_cuda_compiler[] = "cupy.cuda.compiler"; static const char __pyx_k_strided_and_direct[] = ""; static const char __pyx_k_CupyOps_normal_init[] = "CupyOps.normal_init"; static const char __pyx_k_CupyOps_scatter_add[] = "CupyOps.scatter_add"; static const char __pyx_k_NotImplementedError[] = "NotImplementedError"; static const char __pyx_k_Ops_add_batch_outer[] = "Ops.add_batch_outer"; static const char __pyx_k_Ops_update_averages[] = "Ops.update_averages"; static const char __pyx_k_xavier_uniform_init[] = "xavier_uniform_init"; static const char __pyx_k_NumpyOps_inplace_add[] = "NumpyOps.inplace_add"; static const char __pyx_k_NumpyOps_scatter_add[] = "NumpyOps.scatter_add"; static const char __pyx_k_Ops_backprop_seq2col[] = "Ops.backprop_seq2col"; static const char __pyx_k_Ops_backprop_softmax[] = "Ops.backprop_softmax"; static const char __pyx_k_Ops_get_dropout_mask[] = "Ops.get_dropout_mask"; static const char __pyx_k_Ops_square_sequences[] = "Ops.square_sequences"; static const char __pyx_k_strided_and_indirect[] = ""; static const char __pyx_k_CupyOps_backprop_mish[] = "CupyOps.backprop_mish"; static const char __pyx_k_CupyOps_backprop_relu[] = "CupyOps.backprop_relu"; static const char __pyx_k_CupyOps_backprop_selu[] = "CupyOps.backprop_selu"; static const char __pyx_k_CupyOps_clip_gradient[] = "CupyOps.clip_gradient"; static const char __pyx_k_NumpyOps_backprop_elu[] = "NumpyOps.backprop_elu"; static const char __pyx_k_Ops_backprop_softplus[] = "Ops.backprop_softplus"; static const char __pyx_k_Ops_dropout_sequences[] = "Ops.dropout_sequences"; static const char __pyx_k_Ops_softmax_sequences[] = "Ops.softmax_sequences"; static const char __pyx_k_contiguous_and_direct[] = ""; static const char __pyx_k_MemoryView_of_r_object[] = ""; static const char __pyx_k_NumpyOps_backprop_mish[] = "NumpyOps.backprop_mish"; static const char __pyx_k_NumpyOps_backprop_relu[] = "NumpyOps.backprop_relu"; static const char __pyx_k_NumpyOps_backprop_selu[] = "NumpyOps.backprop_selu"; static const char __pyx_k_CupyOps_backprop_maxout[] = "CupyOps.backprop_maxout"; static const char __pyx_k_CupyOps_position_encode[] = "CupyOps.position_encode"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; static const char __pyx_k_Ops_xavier_uniform_init[] = "Ops.xavier_uniform_init"; static const char __pyx_k_contiguous_and_indirect[] = ""; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_CupyOps_backprop_seq2col[] = "CupyOps.backprop_seq2col"; static const char __pyx_k_NumpyOps_backprop_maxout[] = "NumpyOps.backprop_maxout"; static const char __pyx_k_NumpyOps_position_encode[] = "NumpyOps.position_encode"; static const char __pyx_k_CupyOps_backprop_max_pool[] = "CupyOps.backprop_max_pool"; static const char __pyx_k_CupyOps_backprop_sum_pool[] = "CupyOps.backprop_sum_pool"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_NumpyOps_backprop_seq2col[] = "NumpyOps.backprop_seq2col"; static const char __pyx_k_NumpyOps_increment_slices[] = "NumpyOps.increment_slices"; static const char __pyx_k_Ops_dropout_locals_lambda[] = "Ops.dropout.."; static const char __pyx_k_CupyOps_backprop_mean_pool[] = "CupyOps.backprop_mean_pool"; static const char __pyx_k_NumpyOps_backprop_max_pool[] = "NumpyOps.backprop_max_pool"; static const char __pyx_k_NumpyOps_backprop_sum_pool[] = "NumpyOps.backprop_sum_pool"; static const char __pyx_k_backprop_softmax_sequences[] = "backprop_softmax_sequences"; static const char __pyx_k_NumpyOps_backprop_mean_pool[] = "NumpyOps.backprop_mean_pool"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_Ops_backprop_softmax_sequences[] = "Ops.backprop_softmax_sequences"; static const char __pyx_k_strided_and_direct_or_indirect[] = ""; static const char __pyx_k_Softmax_currently_only_supports[] = "Softmax currently only supports 2d. ndim=%d"; static const char __pyx_k_T_grad_T_lr_T_one_minus_beta1_T[] = "T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_Ops_dropout_locals_wrap_backprop[] = "Ops.dropout..wrap_backprop..finish_update"; static const char __pyx_k_Ops_dropout_sequences_locals_lam[] = "Ops.dropout_sequences.."; static const char __pyx_k_Ops_dropout_sequences_locals_wra[] = "Ops.dropout_sequences..wrap_backprop..finish_update"; static const char __pyx_k_Ops_square_sequences_locals_unpa[] = "Ops.square_sequences..unpad"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_m_one_minus_beta1_grad_m_v_one_m[] = "m += one_minus_beta1 * (grad - m);\n v += one_minus_beta2 * (grad * grad - v);\n param -= lr * m / (sqrt(v) + eps);"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static const char __pyx_k_Ops_dropout_locals_wrap_backprop_2[] = "Ops.dropout..wrap_backprop"; static const char __pyx_k_Ops_dropout_sequences_locals_wra_2[] = "Ops.dropout_sequences..wrap_backprop"; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_n_s_B; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_n_s_C; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_CupyOps; static PyObject *__pyx_n_s_CupyOps_adam; static PyObject *__pyx_n_s_CupyOps_asarray; static PyObject *__pyx_n_s_CupyOps_backprop_max_pool; static PyObject *__pyx_n_s_CupyOps_backprop_maxout; static PyObject *__pyx_n_s_CupyOps_backprop_mean_pool; static PyObject *__pyx_n_s_CupyOps_backprop_mish; static PyObject *__pyx_n_s_CupyOps_backprop_relu; static PyObject *__pyx_n_s_CupyOps_backprop_selu; static PyObject *__pyx_n_s_CupyOps_backprop_seq2col; static PyObject *__pyx_n_s_CupyOps_backprop_sum_pool; static PyObject *__pyx_n_s_CupyOps_clip_gradient; static PyObject *__pyx_n_s_CupyOps_gemm; static PyObject *__pyx_n_s_CupyOps_hash; static PyObject *__pyx_n_s_CupyOps_matmul; static PyObject *__pyx_n_s_CupyOps_max_pool; static PyObject *__pyx_n_s_CupyOps_maxout; static PyObject *__pyx_n_s_CupyOps_mean_pool; static PyObject *__pyx_n_s_CupyOps_mish; static PyObject *__pyx_n_s_CupyOps_normal_init; static PyObject *__pyx_n_s_CupyOps_position_encode; static PyObject *__pyx_n_s_CupyOps_relu; static PyObject *__pyx_n_s_CupyOps_scatter_add; static PyObject *__pyx_n_s_CupyOps_selu; static PyObject *__pyx_n_s_CupyOps_seq2col; static PyObject *__pyx_n_s_CupyOps_sum_pool; static PyObject *__pyx_n_s_D; static PyObject *__pyx_n_s_ElementwiseKernel; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_I; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_n_s_MemoryPointer; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_s_N; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_NotImplementedError; static PyObject *__pyx_n_s_NumpyOps; static PyObject *__pyx_n_s_NumpyOps_adam; static PyObject *__pyx_n_s_NumpyOps_add_sum; static PyObject *__pyx_n_s_NumpyOps_affine; static PyObject *__pyx_n_s_NumpyOps_allocate; static PyObject *__pyx_n_s_NumpyOps_asarray; static PyObject *__pyx_n_s_NumpyOps_backprop_elu; static PyObject *__pyx_n_s_NumpyOps_backprop_max_pool; static PyObject *__pyx_n_s_NumpyOps_backprop_maxout; static PyObject *__pyx_n_s_NumpyOps_backprop_mean_pool; static PyObject *__pyx_n_s_NumpyOps_backprop_mish; static PyObject *__pyx_n_s_NumpyOps_backprop_relu; static PyObject *__pyx_n_s_NumpyOps_backprop_selu; static PyObject *__pyx_n_s_NumpyOps_backprop_seq2col; static PyObject *__pyx_n_s_NumpyOps_backprop_sum_pool; static PyObject *__pyx_n_s_NumpyOps_elu; static PyObject *__pyx_n_s_NumpyOps_gemm; static PyObject *__pyx_n_s_NumpyOps_hash; static PyObject *__pyx_n_s_NumpyOps_increment_slices; static PyObject *__pyx_n_s_NumpyOps_inplace_add; static PyObject *__pyx_n_s_NumpyOps_matmul; static PyObject *__pyx_n_s_NumpyOps_max_pool; static PyObject *__pyx_n_s_NumpyOps_maxout; static PyObject *__pyx_n_s_NumpyOps_mean_pool; static PyObject *__pyx_n_s_NumpyOps_mish; static PyObject *__pyx_n_s_NumpyOps_ngrams; static PyObject *__pyx_n_s_NumpyOps_position_encode; static PyObject *__pyx_n_s_NumpyOps_relu; static PyObject *__pyx_n_s_NumpyOps_remap_ids; static PyObject *__pyx_n_s_NumpyOps_scatter_add; static PyObject *__pyx_n_s_NumpyOps_selu; static PyObject *__pyx_n_s_NumpyOps_seq2col; static PyObject *__pyx_n_s_NumpyOps_sum_pool; static PyObject *__pyx_n_b_O; static PyObject *__pyx_n_s_O; static PyObject *__pyx_n_s_Ops; static PyObject *__pyx_n_s_Ops___init; static PyObject *__pyx_n_s_Ops_adam; static PyObject *__pyx_n_s_Ops_add_batch_outer; static PyObject *__pyx_n_s_Ops_add_sum; static PyObject *__pyx_n_s_Ops_affine; static PyObject *__pyx_n_s_Ops_allocate; static PyObject *__pyx_n_s_Ops_argmax; static PyObject *__pyx_n_s_Ops_asarray; static PyObject *__pyx_n_s_Ops_backprop_lstm; static PyObject *__pyx_n_s_Ops_backprop_mish; static PyObject *__pyx_n_s_Ops_backprop_seq2col; static PyObject *__pyx_n_s_Ops_backprop_softmax; static PyObject *__pyx_n_s_Ops_backprop_softmax_sequences; static PyObject *__pyx_n_s_Ops_backprop_softplus; static PyObject *__pyx_n_s_Ops_backprop_take; static PyObject *__pyx_n_s_Ops_batch_dot; static PyObject *__pyx_n_s_Ops_clip_gradient; static PyObject *__pyx_n_s_Ops_clip_low; static PyObject *__pyx_n_s_Ops_dot; static PyObject *__pyx_n_s_Ops_dropout; static PyObject *__pyx_n_s_Ops_dropout_locals_lambda; static PyObject *__pyx_n_s_Ops_dropout_locals_wrap_backprop; static PyObject *__pyx_n_s_Ops_dropout_locals_wrap_backprop_2; static PyObject *__pyx_n_s_Ops_dropout_sequences; static PyObject *__pyx_n_s_Ops_dropout_sequences_locals_lam; static PyObject *__pyx_n_s_Ops_dropout_sequences_locals_wra; static PyObject *__pyx_n_s_Ops_dropout_sequences_locals_wra_2; static PyObject *__pyx_n_s_Ops_dsigmoid; static PyObject *__pyx_n_s_Ops_dtanh; static PyObject *__pyx_n_s_Ops_expand_dims; static PyObject *__pyx_n_s_Ops_flatten; static PyObject *__pyx_n_s_Ops_get_dropout_mask; static PyObject *__pyx_n_s_Ops_he_normal_init; static PyObject *__pyx_n_s_Ops_logloss; static PyObject *__pyx_n_s_Ops_lstm; static PyObject *__pyx_n_s_Ops_mish; static PyObject *__pyx_n_s_Ops_norm; static PyObject *__pyx_n_s_Ops_normal_init; static PyObject *__pyx_n_s_Ops_seq2col; static PyObject *__pyx_n_s_Ops_sigmoid; static PyObject *__pyx_n_s_Ops_softmax; static PyObject *__pyx_n_s_Ops_softmax_sequences; static PyObject *__pyx_n_s_Ops_softplus; static PyObject *__pyx_n_s_Ops_square_sequences; static PyObject *__pyx_n_s_Ops_square_sequences_locals_unpa; static PyObject *__pyx_n_s_Ops_take_which; static PyObject *__pyx_n_s_Ops_unflatten; static PyObject *__pyx_n_s_Ops_unzip; static PyObject *__pyx_n_s_Ops_update_averages; static PyObject *__pyx_n_s_Ops_xavier_uniform_init; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_P; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_Sized; static PyObject *__pyx_kp_s_Softmax_currently_only_supports; static PyObject *__pyx_n_s_T; static PyObject *__pyx_kp_s_T_grad_T_lr_T_one_minus_beta1_T; static PyObject *__pyx_kp_s_T_param_T_m_T_v; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_W; static PyObject *__pyx_n_s_X; static PyObject *__pyx_n_s_Xs; static PyObject *__pyx_n_s_Xsoft; static PyObject *__pyx_n_s_Xsub; static PyObject *__pyx_n_s_Y; static PyObject *__pyx_n_s_Y_ptr; static PyObject *__pyx_n_s__144; static PyObject *__pyx_n_s_a; static PyObject *__pyx_n_s_act_pieces; static PyObject *__pyx_n_s_adam; static PyObject *__pyx_n_s_add; static PyObject *__pyx_n_s_add_batch_outer; static PyObject *__pyx_n_s_add_gradient_noise; static PyObject *__pyx_n_s_add_sum; static PyObject *__pyx_n_s_affine; static PyObject *__pyx_n_s_aligned_alloc; static PyObject *__pyx_n_s_allocate; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_argmax; static PyObject *__pyx_n_s_args; static PyObject *__pyx_n_s_arr; static PyObject *__pyx_n_s_arr_i; static PyObject *__pyx_n_s_array; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_ascontiguousarray; static PyObject *__pyx_n_s_at; static PyObject *__pyx_n_s_axes; static PyObject *__pyx_n_s_axis; static PyObject *__pyx_n_s_backprop; static PyObject *__pyx_n_s_backprop_elu; static PyObject *__pyx_n_s_backprop_lstm; static PyObject *__pyx_n_s_backprop_max_pool; static PyObject *__pyx_n_s_backprop_maxout; static PyObject *__pyx_n_s_backprop_mean_pool; static PyObject *__pyx_n_s_backprop_mish; static PyObject *__pyx_n_s_backprop_relu; static PyObject *__pyx_n_s_backprop_selu; static PyObject *__pyx_n_s_backprop_seq2col; static PyObject *__pyx_n_s_backprop_softmax; static PyObject *__pyx_n_s_backprop_softmax_sequences; static PyObject *__pyx_n_s_backprop_softplus; static PyObject *__pyx_n_s_backprop_sum_pool; static PyObject *__pyx_n_s_backprop_take; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_batch_dot; static PyObject *__pyx_n_s_batch_size_at_t; static PyObject *__pyx_n_s_best; static PyObject *__pyx_n_s_beta1; static PyObject *__pyx_n_s_beta2; static PyObject *__pyx_n_s_bias; static PyObject *__pyx_n_s_blis; static PyObject *__pyx_n_s_blis_py; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_c_contiguous; static PyObject *__pyx_n_s_cells; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_clip; static PyObject *__pyx_n_s_clip_gradient; static PyObject *__pyx_n_s_clip_low; static PyObject *__pyx_n_s_coinflips; static PyObject *__pyx_n_s_collections; static PyObject *__pyx_n_s_collections_abc; static PyObject *__pyx_n_s_cols; static PyObject *__pyx_n_s_compat; static PyObject *__pyx_n_s_compile_with_cache; static PyObject *__pyx_n_s_concatenate; static PyObject *__pyx_n_s_contig_array; static PyObject *__pyx_n_s_contig_starts; static PyObject *__pyx_n_s_contig_to_add; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_copy; static PyObject *__pyx_n_s_copy_array; static PyObject *__pyx_n_s_cpu; static PyObject *__pyx_n_s_cpu_clip_gradient; static PyObject *__pyx_n_s_ct; static PyObject *__pyx_n_s_cuda; static PyObject *__pyx_n_s_cupy; static PyObject *__pyx_n_s_cupy_cuda; static PyObject *__pyx_n_s_cupy_cuda_compiler; static PyObject *__pyx_n_s_custom_kernels; static PyObject *__pyx_n_s_dX; static PyObject *__pyx_n_s_dX__bo; static PyObject *__pyx_n_s_dX__bop; static PyObject *__pyx_n_s_dX_ptr; static PyObject *__pyx_n_s_dXsub; static PyObject *__pyx_n_s_dY; static PyObject *__pyx_n_s_dYsub; static PyObject *__pyx_n_s_d_cells; static PyObject *__pyx_n_s_d_gate_pieces; static PyObject *__pyx_n_s_d_maxes; static PyObject *__pyx_n_s_d_means; static PyObject *__pyx_n_s_d_output; static PyObject *__pyx_n_s_d_prev; static PyObject *__pyx_n_s_d_sums; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_s_data_ptr; static PyObject *__pyx_n_s_dc; static PyObject *__pyx_n_s_decay; static PyObject *__pyx_n_s_delta; static PyObject *__pyx_n_s_delta_2; static PyObject *__pyx_n_s_dest; static PyObject *__pyx_n_s_device; static PyObject *__pyx_n_s_dhc; static PyObject *__pyx_n_s_dhf; static PyObject *__pyx_n_s_dhi; static PyObject *__pyx_n_s_dho; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_doc; static PyObject *__pyx_n_s_dot; static PyObject *__pyx_n_s_dotted; static PyObject *__pyx_n_s_drop; static PyObject *__pyx_n_s_dropout; static PyObject *__pyx_n_s_dropout_sequences; static PyObject *__pyx_n_s_dsigmoid; static PyObject *__pyx_n_s_dtanh; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_elu; static PyObject *__pyx_n_s_ema; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_entropy; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_eps; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_exp; static PyObject *__pyx_n_s_expand_dims; static PyObject *__pyx_n_s_extra_dims; static PyObject *__pyx_n_s_f; static PyObject *__pyx_n_s_fan_in; static PyObject *__pyx_n_s_fill; static PyObject *__pyx_n_s_finish_update; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_flatten; static PyObject *__pyx_n_s_float32; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_gate_pieces; static PyObject *__pyx_n_s_gemm; static PyObject *__pyx_n_s_get; static PyObject *__pyx_n_s_get_array_module; static PyObject *__pyx_n_s_get_dropout_mask; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_gpu; static PyObject *__pyx_n_s_grad_norm; static PyObject *__pyx_n_s_gradient; static PyObject *__pyx_n_s_hash; static PyObject *__pyx_n_s_hc; static PyObject *__pyx_n_s_he_normal_init; static PyObject *__pyx_n_s_hf; static PyObject *__pyx_n_s_hi; static PyObject *__pyx_n_s_ho; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_ids; static PyObject *__pyx_n_s_ids_mv; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_in_size; static PyObject *__pyx_n_s_increment_slices; static PyObject *__pyx_n_s_indices; static PyObject *__pyx_n_s_init; static PyObject *__pyx_n_s_inits; static PyObject *__pyx_n_s_inplace; static PyObject *__pyx_n_s_inplace_add; static PyObject *__pyx_n_s_inputs; static PyObject *__pyx_n_s_int32; static PyObject *__pyx_n_s_integer_types; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_keepdims; static PyObject *__pyx_n_s_key; static PyObject *__pyx_n_s_keys; static PyObject *__pyx_n_s_keys_2; static PyObject *__pyx_n_s_kwargs; static PyObject *__pyx_n_s_learn_rate; static PyObject *__pyx_n_s_length; static PyObject *__pyx_n_s_lengths; static PyObject *__pyx_n_s_lengths_indices; static PyObject *__pyx_n_s_linalg; static PyObject *__pyx_n_s_loc; static PyObject *__pyx_n_s_log; static PyObject *__pyx_n_s_log1p; static PyObject *__pyx_n_s_log1p_exp; static PyObject *__pyx_n_s_log_yp; static PyObject *__pyx_n_s_logloss; static PyObject *__pyx_n_s_loss; static PyObject *__pyx_n_s_lstm; static PyObject *__pyx_n_s_m; static PyObject *__pyx_kp_s_m_one_minus_beta1_grad_m_v_one_m; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mapped; static PyObject *__pyx_n_s_mapping; static PyObject *__pyx_n_s_mask; static PyObject *__pyx_n_s_masked; static PyObject *__pyx_n_s_masks; static PyObject *__pyx_n_s_matmul; static PyObject *__pyx_n_s_max; static PyObject *__pyx_n_s_max_decay; static PyObject *__pyx_n_s_max_pool; static PyObject *__pyx_n_s_maxes; static PyObject *__pyx_n_s_maximum; static PyObject *__pyx_n_s_maxout; static PyObject *__pyx_n_s_mean_pool; static PyObject *__pyx_n_s_means; static PyObject *__pyx_n_s_mem; static PyObject *__pyx_n_s_memptr; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_metaclass; static PyObject *__pyx_n_s_mish; static PyObject *__pyx_n_s_mod_rate; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_module; static PyObject *__pyx_n_s_mom1; static PyObject *__pyx_n_s_mom2; static PyObject *__pyx_n_s_n; static PyObject *__pyx_n_s_nB; static PyObject *__pyx_n_s_nF; static PyObject *__pyx_n_s_nP; static PyObject *__pyx_n_s_nS; static PyObject *__pyx_n_s_nW; static PyObject *__pyx_n_s_n_items; static PyObject *__pyx_n_s_n_slice; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndarray; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_n_s_new_x; static PyObject *__pyx_n_s_ngrams; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_noise_level; static PyObject *__pyx_n_s_norm; static PyObject *__pyx_n_s_normal; static PyObject *__pyx_n_s_normal_init; static PyObject *__pyx_n_s_nr_weight; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_object; static PyObject *__pyx_n_s_omega; static PyObject *__pyx_kp_s_ops_pyx; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_out; static PyObject *__pyx_n_s_out_2; static PyObject *__pyx_n_s_out_array; static PyObject *__pyx_n_s_output; static PyObject *__pyx_n_s_output_2; static PyObject *__pyx_n_s_output_arr; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pad; static PyObject *__pyx_n_s_padded; static PyObject *__pyx_n_s_period; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pointer; static PyObject *__pyx_n_s_position_encode; static PyObject *__pyx_n_s_positions; static PyObject *__pyx_n_s_prepare; static PyObject *__pyx_n_s_prev; static PyObject *__pyx_n_s_prod; static PyObject *__pyx_n_s_py; static PyObject *__pyx_n_s_py_best; static PyObject *__pyx_n_s_py_cands; static PyObject *__pyx_n_s_py_which; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_qualname; static PyObject *__pyx_n_s_random; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_relu; static PyObject *__pyx_n_s_remap_ids; static PyObject *__pyx_n_s_reshape; static PyObject *__pyx_n_s_result; static PyObject *__pyx_n_s_reverse; static PyObject *__pyx_n_s_scale; static PyObject *__pyx_n_s_scatter_add; static PyObject *__pyx_n_s_seed; static PyObject *__pyx_n_s_self; static PyObject *__pyx_n_s_selu; static PyObject *__pyx_n_s_seq; static PyObject *__pyx_n_s_seq2col; static PyObject *__pyx_n_s_seqs; static PyObject *__pyx_n_s_seqs_i; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_shifted; static PyObject *__pyx_n_s_sigmoid; static PyObject *__pyx_n_s_signal; static PyObject *__pyx_n_s_signal_in; static PyObject *__pyx_n_s_signal_in_2; static PyObject *__pyx_n_s_signal_out; static PyObject *__pyx_n_s_signal_out_2; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_softmax; static PyObject *__pyx_n_s_softmax_sequences; static PyObject *__pyx_n_s_softplus; static PyObject *__pyx_n_s_sort; static PyObject *__pyx_n_s_sqrt; static PyObject *__pyx_n_s_square_sequences; static PyObject *__pyx_n_s_src; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_starts; static PyObject *__pyx_n_s_starts_2; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_n_s_stride; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_sum; static PyObject *__pyx_n_s_sum_pool; static PyObject *__pyx_n_s_sumdx; static PyObject *__pyx_n_s_summed; static PyObject *__pyx_n_s_sums; static PyObject *__pyx_n_s_t; static PyObject *__pyx_n_s_take_which; static PyObject *__pyx_n_s_tanh; static PyObject *__pyx_n_s_tensordot; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thinc_neural_ops; static PyObject *__pyx_n_s_threshold; static PyObject *__pyx_n_s_timestep; static PyObject *__pyx_n_s_to_add; static PyObject *__pyx_n_s_to_add_2; static PyObject *__pyx_n_s_to_sum; static PyObject *__pyx_n_s_trans1; static PyObject *__pyx_n_s_trans2; static PyObject *__pyx_n_s_transpose; static PyObject *__pyx_n_s_uint32; static PyObject *__pyx_n_s_uint64; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unflat; static PyObject *__pyx_n_s_unflatten; static PyObject *__pyx_n_s_uniform; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_unpad; static PyObject *__pyx_n_s_unpadded; static PyObject *__pyx_n_s_unzip; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_update_averages; static PyObject *__pyx_n_s_util; static PyObject *__pyx_n_s_value; static PyObject *__pyx_n_s_variance; static PyObject *__pyx_n_s_weights; static PyObject *__pyx_n_s_where; static PyObject *__pyx_n_s_which; static PyObject *__pyx_n_s_which__bo; static PyObject *__pyx_n_s_whole_array; static PyObject *__pyx_n_s_workon; static PyObject *__pyx_n_s_wrap_backprop; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xavier_uniform_init; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_y_pred; static PyObject *__pyx_n_s_y_true; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_n_s_zeros_aligned; static PyObject *__pyx_n_s_zip; static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_xp); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_2seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_seq, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_4backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dY, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_finish_update(PyObject *__pyx_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_6dropout_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dropout, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_finish_update(PyObject *__pyx_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_8dropout(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_dropout, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_10flatten(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dtype, PyObject *__pyx_v_pad); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_12unflatten(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths, PyObject *__pyx_v_pad); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_16square_sequences_unpad(PyObject *__pyx_self, PyObject *__pyx_v_padded); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_14square_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_seqs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_16get_dropout_mask(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_drop); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_18allocate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_dtype); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_20unzip(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_22asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_dtype); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_24batch_dot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_transpose); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_26add_batch_outer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_output, PyObject *__pyx_v_x, PyObject *__pyx_v_y); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_28norm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_30dot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_32affine(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_bias, PyObject *__pyx_v_signal); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_34add_sum(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_out, PyObject *__pyx_v_to_sum); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_36argmax(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_38sigmoid(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_40dsigmoid(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_y); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_42dtanh(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_y); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_44softmax(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_inplace, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_46softmax_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_Xs, PyObject *__pyx_v_lengths, PyObject *__pyx_v_inplace, CYTHON_UNUSED PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_48backprop_softmax(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_dY, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_50backprop_softmax_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dy, PyObject *__pyx_v_y, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_52expand_dims(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_a, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_54clip_low(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_value, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_56take_which(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_which, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_58backprop_take(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dX__bo, PyObject *__pyx_v_which__bo, PyObject *__pyx_v_nP); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_60lstm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_output, PyObject *__pyx_v_cells, PyObject *__pyx_v_act_pieces, PyObject *__pyx_v_prev); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_62backprop_lstm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d_cells, PyObject *__pyx_v_d_prev, PyObject *__pyx_v_d_gate_pieces, PyObject *__pyx_v_d_output, PyObject *__pyx_v_gate_pieces, PyObject *__pyx_v_cells, PyObject *__pyx_v_prev); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_64softplus(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_66backprop_softplus(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_68mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_70backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_72xavier_uniform_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_74normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_fan_in, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_76he_normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_fan_in); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_78update_averages(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_ema, PyObject *__pyx_v_weights, PyObject *__pyx_v_t, PyObject *__pyx_v_max_decay); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_80adam(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_gradient, PyObject *__pyx_v_mom1, PyObject *__pyx_v_mom2, PyObject *__pyx_v_beta1, PyObject *__pyx_v_beta2, PyObject *__pyx_v_eps, PyObject *__pyx_v_learn_rate, PyObject *__pyx_v_mod_rate); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_82clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_threshold); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_84logloss(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_dtype); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_2allocate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_dtype); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_4inplace_add(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, float __pyx_v_scale); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_6matmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_8gemm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, PyObject *__pyx_v_trans1, PyObject *__pyx_v_trans2, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_10affine(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_bias, PyObject *__pyx_v_signal); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_12elu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, CYTHON_UNUSED PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_14selu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, CYTHON_UNUSED PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_16backprop_selu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_delta_, PyArrayObject *__pyx_v_signal_in_, CYTHON_UNUSED PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_18backprop_elu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_delta_, PyArrayObject *__pyx_v_signal_out_, CYTHON_UNUSED PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_20relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_22backprop_relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_dY, PyArrayObject *__pyx_v_Y, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_24maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_py_cands); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_26backprop_maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dX__bo, __Pyx_memviewslice __pyx_v_which__bo, int __pyx_v_P); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_28mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_30backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dY, __Pyx_memviewslice __pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_32seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_seq, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_34backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dY, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_36remap_ids(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, struct __pyx_obj_7preshed_4maps_PreshMap *__pyx_v_mapping, __Pyx_memviewslice __pyx_v_ids_mv, uint64_t __pyx_v_value); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_38increment_slices(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyArrayObject *__pyx_v_contig_array, PyArrayObject *__pyx_v__to_add, PyObject *__pyx_v__starts); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_40hash(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_ids, uint32_t __pyx_v_seed); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_42mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_44sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_46backprop_mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_means, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_48backprop_sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_sums, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_50max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_52backprop_max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_maxes, __Pyx_memviewslice __pyx_v_which, __Pyx_memviewslice __pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_54add_sum(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_out, PyArrayObject *__pyx_v_to_sum); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_56scatter_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyArrayObject *__pyx_v_out, PyArrayObject *__pyx_v_ids, PyArrayObject *__pyx_v_inputs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_58adam(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_gradient, __Pyx_memviewslice __pyx_v_mom1, __Pyx_memviewslice __pyx_v_mom2, float __pyx_v_beta1, float __pyx_v_beta2, float __pyx_v_eps, float __pyx_v_learn_rate, CYTHON_UNUSED float __pyx_v_mod_rate); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_60ngrams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, int __pyx_v_n, __Pyx_memviewslice __pyx_v_keys_); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_62position_encode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, int __pyx_v_N, int __pyx_v_D, int __pyx_v_period, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_matmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_2gemm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_out, PyObject *__pyx_v_trans1, PyObject *__pyx_v_trans2); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_4asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dtype); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_6maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_8backprop_maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_which, int __pyx_v_P); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_10relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_12backprop_relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_delta_, PyObject *__pyx_v_signal_out, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_14selu(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_16backprop_selu(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_delta, PyObject *__pyx_v_signal_in, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_18mish(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_20backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_22clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_threshold); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_24seq2col(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_seq, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_26backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, int __pyx_v_nW); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_28mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_30backprop_mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_means, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_32max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_34backprop_max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_maxes, PyObject *__pyx_v_which, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_36sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_38backprop_sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_sums, PyObject *__pyx_v_lengths); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_40hash(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_ids, uint64_t __pyx_v_seed); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_42scatter_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_out, PyObject *__pyx_v_ids, PyObject *__pyx_v_inputs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_44adam(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_gradient, PyObject *__pyx_v_mom1, PyObject *__pyx_v_mom2, PyObject *__pyx_v_beta1, PyObject *__pyx_v_beta2, PyObject *__pyx_v_eps, PyObject *__pyx_v_learn_rate, CYTHON_UNUSED PyObject *__pyx_v_mod_rate); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_46normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_fan_in, PyObject *__pyx_v_inplace); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_48position_encode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_cpu_clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_gradient, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_threshold); /* proto */ static PyObject *__pyx_pf_5thinc_6neural_3ops_2add_gradient_noise(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_gradient, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_noise_level, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_timestep); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_float_0_; static PyObject *__pyx_float_1_; static PyObject *__pyx_float_2_; static PyObject *__pyx_float_4_; static PyObject *__pyx_float_6_; static PyObject *__pyx_float_0_0; static PyObject *__pyx_float_1_0; static PyObject *__pyx_float_20_; static PyObject *__pyx_float_10_0; static PyObject *__pyx_float_1eneg_8; static PyObject *__pyx_float_0_9999; static PyObject *__pyx_float_neg_20_; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_3; static PyObject *__pyx_int_4; static PyObject *__pyx_int_5; static PyObject *__pyx_int_20; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_codeobj_; static PyObject *__pyx_slice__3; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__8; static PyObject *__pyx_slice__18; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__22; static PyObject *__pyx_codeobj__2; static PyObject *__pyx_codeobj__4; static PyObject *__pyx_codeobj__6; static PyObject *__pyx_codeobj__7; static PyObject *__pyx_codeobj__9; static PyObject *__pyx_tuple__114; static PyObject *__pyx_tuple__119; static PyObject *__pyx_tuple__120; static PyObject *__pyx_tuple__121; static PyObject *__pyx_tuple__122; static PyObject *__pyx_tuple__123; static PyObject *__pyx_tuple__124; static PyObject *__pyx_tuple__125; static PyObject *__pyx_tuple__126; static PyObject *__pyx_tuple__127; static PyObject *__pyx_tuple__128; static PyObject *__pyx_tuple__129; static PyObject *__pyx_tuple__130; static PyObject *__pyx_tuple__131; static PyObject *__pyx_tuple__132; static PyObject *__pyx_tuple__133; static PyObject *__pyx_tuple__134; static PyObject *__pyx_tuple__135; static PyObject *__pyx_tuple__136; static PyObject *__pyx_tuple__137; static PyObject *__pyx_tuple__138; static PyObject *__pyx_tuple__139; static PyObject *__pyx_tuple__140; static PyObject *__pyx_tuple__141; static PyObject *__pyx_tuple__142; static PyObject *__pyx_tuple__145; static PyObject *__pyx_tuple__146; static PyObject *__pyx_tuple__147; static PyObject *__pyx_tuple__148; static PyObject *__pyx_tuple__149; static PyObject *__pyx_tuple__150; static PyObject *__pyx_tuple__151; static PyObject *__pyx_tuple__152; static PyObject *__pyx_tuple__153; static PyObject *__pyx_tuple__154; static PyObject *__pyx_tuple__155; static PyObject *__pyx_tuple__156; static PyObject *__pyx_tuple__157; static PyObject *__pyx_tuple__158; static PyObject *__pyx_tuple__159; static PyObject *__pyx_tuple__160; static PyObject *__pyx_tuple__161; static PyObject *__pyx_tuple__162; static PyObject *__pyx_tuple__163; static PyObject *__pyx_tuple__164; static PyObject *__pyx_tuple__165; static PyObject *__pyx_tuple__166; static PyObject *__pyx_tuple__167; static PyObject *__pyx_tuple__168; static PyObject *__pyx_tuple__169; static PyObject *__pyx_tuple__170; static PyObject *__pyx_tuple__171; static PyObject *__pyx_tuple__172; static PyObject *__pyx_tuple__173; static PyObject *__pyx_tuple__174; static PyObject *__pyx_tuple__175; static PyObject *__pyx_tuple__176; static PyObject *__pyx_tuple__177; static PyObject *__pyx_tuple__178; static PyObject *__pyx_tuple__179; static PyObject *__pyx_tuple__180; static PyObject *__pyx_tuple__181; static PyObject *__pyx_tuple__182; static PyObject *__pyx_tuple__183; static PyObject *__pyx_tuple__184; static PyObject *__pyx_tuple__185; static PyObject *__pyx_tuple__186; static PyObject *__pyx_tuple__187; static PyObject *__pyx_tuple__188; static PyObject *__pyx_tuple__189; static PyObject *__pyx_tuple__190; static PyObject *__pyx_tuple__191; static PyObject *__pyx_tuple__192; static PyObject *__pyx_tuple__193; static PyObject *__pyx_tuple__194; static PyObject *__pyx_tuple__195; static PyObject *__pyx_tuple__196; static PyObject *__pyx_tuple__197; static PyObject *__pyx_tuple__198; static PyObject *__pyx_tuple__199; static PyObject *__pyx_tuple__200; static PyObject *__pyx_tuple__201; static PyObject *__pyx_tuple__202; static PyObject *__pyx_tuple__203; static PyObject *__pyx_tuple__204; static PyObject *__pyx_tuple__205; static PyObject *__pyx_tuple__206; static PyObject *__pyx_tuple__207; static PyObject *__pyx_tuple__208; static PyObject *__pyx_tuple__209; static PyObject *__pyx_tuple__210; static PyObject *__pyx_tuple__211; static PyObject *__pyx_tuple__212; static PyObject *__pyx_tuple__213; static PyObject *__pyx_tuple__214; static PyObject *__pyx_tuple__215; static PyObject *__pyx_tuple__216; static PyObject *__pyx_tuple__217; static PyObject *__pyx_tuple__218; static PyObject *__pyx_tuple__219; static PyObject *__pyx_tuple__220; static PyObject *__pyx_tuple__221; static PyObject *__pyx_tuple__222; static PyObject *__pyx_tuple__223; static PyObject *__pyx_tuple__224; static PyObject *__pyx_tuple__225; static PyObject *__pyx_tuple__226; static PyObject *__pyx_tuple__227; static PyObject *__pyx_tuple__228; static PyObject *__pyx_tuple__229; static PyObject *__pyx_tuple__230; static PyObject *__pyx_tuple__231; static PyObject *__pyx_tuple__232; static PyObject *__pyx_tuple__233; static PyObject *__pyx_tuple__234; static PyObject *__pyx_tuple__235; static PyObject *__pyx_tuple__236; static PyObject *__pyx_tuple__237; static PyObject *__pyx_tuple__238; static PyObject *__pyx_tuple__239; static PyObject *__pyx_tuple__240; static PyObject *__pyx_tuple__241; static PyObject *__pyx_tuple__242; static PyObject *__pyx_tuple__243; static PyObject *__pyx_tuple__244; static PyObject *__pyx_tuple__245; static PyObject *__pyx_tuple__246; static PyObject *__pyx_tuple__247; static PyObject *__pyx_tuple__248; static PyObject *__pyx_tuple__249; static PyObject *__pyx_tuple__250; static PyObject *__pyx_tuple__251; static PyObject *__pyx_tuple__252; static PyObject *__pyx_tuple__253; static PyObject *__pyx_tuple__254; static PyObject *__pyx_tuple__255; static PyObject *__pyx_tuple__256; static PyObject *__pyx_tuple__257; static PyObject *__pyx_tuple__258; static PyObject *__pyx_tuple__259; static PyObject *__pyx_tuple__260; static PyObject *__pyx_tuple__261; static PyObject *__pyx_tuple__262; static PyObject *__pyx_tuple__263; static PyObject *__pyx_tuple__264; static PyObject *__pyx_tuple__265; static PyObject *__pyx_tuple__266; static PyObject *__pyx_tuple__267; static PyObject *__pyx_tuple__268; static PyObject *__pyx_tuple__269; static PyObject *__pyx_tuple__270; static PyObject *__pyx_tuple__271; static PyObject *__pyx_tuple__272; static PyObject *__pyx_tuple__273; static PyObject *__pyx_tuple__274; static PyObject *__pyx_tuple__275; static PyObject *__pyx_tuple__276; static PyObject *__pyx_tuple__277; static PyObject *__pyx_tuple__278; static PyObject *__pyx_tuple__279; static PyObject *__pyx_tuple__280; static PyObject *__pyx_tuple__281; static PyObject *__pyx_tuple__282; static PyObject *__pyx_tuple__283; static PyObject *__pyx_tuple__284; static PyObject *__pyx_tuple__285; static PyObject *__pyx_tuple__286; static PyObject *__pyx_tuple__287; static PyObject *__pyx_tuple__288; static PyObject *__pyx_tuple__289; static PyObject *__pyx_tuple__290; static PyObject *__pyx_tuple__291; static PyObject *__pyx_tuple__292; static PyObject *__pyx_tuple__293; static PyObject *__pyx_tuple__294; static PyObject *__pyx_tuple__295; static PyObject *__pyx_tuple__296; static PyObject *__pyx_tuple__297; static PyObject *__pyx_tuple__298; static PyObject *__pyx_tuple__299; static PyObject *__pyx_codeobj__11; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__15; static PyObject *__pyx_codeobj__19; static PyObject *__pyx_codeobj__21; static PyObject *__pyx_codeobj__23; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__25; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__28; static PyObject *__pyx_codeobj__29; static PyObject *__pyx_codeobj__30; static PyObject *__pyx_codeobj__31; static PyObject *__pyx_codeobj__32; static PyObject *__pyx_codeobj__33; static PyObject *__pyx_codeobj__34; static PyObject *__pyx_codeobj__35; static PyObject *__pyx_codeobj__36; static PyObject *__pyx_codeobj__37; static PyObject *__pyx_codeobj__38; static PyObject *__pyx_codeobj__39; static PyObject *__pyx_codeobj__40; static PyObject *__pyx_codeobj__41; static PyObject *__pyx_codeobj__42; static PyObject *__pyx_codeobj__43; static PyObject *__pyx_codeobj__44; static PyObject *__pyx_codeobj__45; static PyObject *__pyx_codeobj__46; static PyObject *__pyx_codeobj__47; static PyObject *__pyx_codeobj__48; static PyObject *__pyx_codeobj__49; static PyObject *__pyx_codeobj__50; static PyObject *__pyx_codeobj__51; static PyObject *__pyx_codeobj__52; static PyObject *__pyx_codeobj__53; static PyObject *__pyx_codeobj__54; static PyObject *__pyx_codeobj__55; static PyObject *__pyx_codeobj__56; static PyObject *__pyx_codeobj__57; static PyObject *__pyx_codeobj__58; static PyObject *__pyx_codeobj__59; static PyObject *__pyx_codeobj__60; static PyObject *__pyx_codeobj__61; static PyObject *__pyx_codeobj__62; static PyObject *__pyx_codeobj__63; static PyObject *__pyx_codeobj__64; static PyObject *__pyx_codeobj__65; static PyObject *__pyx_codeobj__66; static PyObject *__pyx_codeobj__67; static PyObject *__pyx_codeobj__68; static PyObject *__pyx_codeobj__69; static PyObject *__pyx_codeobj__70; static PyObject *__pyx_codeobj__71; static PyObject *__pyx_codeobj__72; static PyObject *__pyx_codeobj__73; static PyObject *__pyx_codeobj__74; static PyObject *__pyx_codeobj__75; static PyObject *__pyx_codeobj__76; static PyObject *__pyx_codeobj__77; static PyObject *__pyx_codeobj__78; static PyObject *__pyx_codeobj__79; static PyObject *__pyx_codeobj__80; static PyObject *__pyx_codeobj__81; static PyObject *__pyx_codeobj__82; static PyObject *__pyx_codeobj__83; static PyObject *__pyx_codeobj__84; static PyObject *__pyx_codeobj__85; static PyObject *__pyx_codeobj__86; static PyObject *__pyx_codeobj__87; static PyObject *__pyx_codeobj__88; static PyObject *__pyx_codeobj__89; static PyObject *__pyx_codeobj__90; static PyObject *__pyx_codeobj__91; static PyObject *__pyx_codeobj__92; static PyObject *__pyx_codeobj__93; static PyObject *__pyx_codeobj__94; static PyObject *__pyx_codeobj__95; static PyObject *__pyx_codeobj__96; static PyObject *__pyx_codeobj__97; static PyObject *__pyx_codeobj__98; static PyObject *__pyx_codeobj__99; static PyObject *__pyx_codeobj__100; static PyObject *__pyx_codeobj__101; static PyObject *__pyx_codeobj__102; static PyObject *__pyx_codeobj__103; static PyObject *__pyx_codeobj__104; static PyObject *__pyx_codeobj__105; static PyObject *__pyx_codeobj__106; static PyObject *__pyx_codeobj__107; static PyObject *__pyx_codeobj__108; static PyObject *__pyx_codeobj__109; static PyObject *__pyx_codeobj__110; static PyObject *__pyx_codeobj__111; static PyObject *__pyx_codeobj__112; static PyObject *__pyx_codeobj__113; static PyObject *__pyx_codeobj__115; static PyObject *__pyx_codeobj__116; static PyObject *__pyx_codeobj__117; static PyObject *__pyx_codeobj__118; static PyObject *__pyx_codeobj__143; /* Late includes */ /* "thinc/neural/ops.pyx":61 * xp = None * * def __init__(self, xp=None): # <<<<<<<<<<<<<< * if xp is not None: * self.xp = xp */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_xp = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_xp,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_xp); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 61, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_xp = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 61, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops___init__(__pyx_self, __pyx_v_self, __pyx_v_xp); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_xp) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj_) __Pyx_RefNannySetupContext("__init__", 0); __Pyx_TraceCall("__init__", __pyx_f[0], 61, 0, __PYX_ERR(0, 61, __pyx_L1_error)); /* "thinc/neural/ops.pyx":62 * * def __init__(self, xp=None): * if xp is not None: # <<<<<<<<<<<<<< * self.xp = xp * */ __pyx_t_1 = (__pyx_v_xp != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":63 * def __init__(self, xp=None): * if xp is not None: * self.xp = xp # <<<<<<<<<<<<<< * * def seq2col(self, seq, int nW): */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_xp, __pyx_v_xp) < 0) __PYX_ERR(0, 63, __pyx_L1_error) /* "thinc/neural/ops.pyx":62 * * def __init__(self, xp=None): * if xp is not None: # <<<<<<<<<<<<<< * self.xp = xp * */ } /* "thinc/neural/ops.pyx":61 * xp = None * * def __init__(self, xp=None): # <<<<<<<<<<<<<< * if xp is not None: * self.xp = xp */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":65 * self.xp = xp * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_3seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5thinc_6neural_3ops_3Ops_2seq2col[] = "Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.\n The new sequence is constructed by concatenating nW preceding and succeeding\n vectors onto each column in the sequence, to extract a window of features.\n "; static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_3seq2col = {"seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_3seq2col, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5thinc_6neural_3ops_3Ops_2seq2col}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_3seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_seq = 0; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_seq,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seq)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 1); __PYX_ERR(0, 65, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 2); __PYX_ERR(0, 65, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "seq2col") < 0)) __PYX_ERR(0, 65, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_seq = values[1]; __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 65, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 65, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_2seq2col(__pyx_self, __pyx_v_self, __pyx_v_seq, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_2seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_seq, int __pyx_v_nW) { int __pyx_v_B; int __pyx_v_I; PyObject *__pyx_v_cols = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__2) __Pyx_RefNannySetupContext("seq2col", 0); __Pyx_TraceCall("seq2col", __pyx_f[0], 65, 0, __PYX_ERR(0, 65, __pyx_L1_error)); /* "thinc/neural/ops.pyx":71 * ''' * # This is a test implementation that only supports nW=1 * assert nW == 1 # <<<<<<<<<<<<<< * cdef int B = seq.shape[0] * cdef int I = seq.shape[1] */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_nW == 1) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 71, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":72 * # This is a test implementation that only supports nW=1 * assert nW == 1 * cdef int B = seq.shape[0] # <<<<<<<<<<<<<< * cdef int I = seq.shape[1] * cols = self.allocate((B, (nW*2+1), I)) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_seq, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 72, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_B = __pyx_t_3; /* "thinc/neural/ops.pyx":73 * assert nW == 1 * cdef int B = seq.shape[0] * cdef int I = seq.shape[1] # <<<<<<<<<<<<<< * cols = self.allocate((B, (nW*2+1), I)) * # Copy left contexts. The last words aren't the left-context for anything. */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_seq, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 73, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_I = __pyx_t_3; /* "thinc/neural/ops.pyx":74 * cdef int B = seq.shape[0] * cdef int I = seq.shape[1] * cols = self.allocate((B, (nW*2+1), I)) # <<<<<<<<<<<<<< * # Copy left contexts. The last words aren't the left-context for anything. * cols[nW:, :nW] = seq[:-nW].reshape((-1, nW, I)) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_long(((__pyx_v_nW * 2) + 1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_6, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_7); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_cols = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":76 * cols = self.allocate((B, (nW*2+1), I)) * # Copy left contexts. The last words aren't the left-context for anything. * cols[nW:, :nW] = seq[:-nW].reshape((-1, nW, I)) # <<<<<<<<<<<<<< * cols[:, nW] = seq * cols[:-nW, nW+1:] = seq[nW:].reshape((-1, nW, I)) */ __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_seq, 0, (-__pyx_v_nW), NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_reshape); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_6); __pyx_t_2 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = PySlice_New(__pyx_t_7, Py_None, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PySlice_New(Py_None, __pyx_t_7, Py_None); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; if (unlikely(PyObject_SetItem(__pyx_v_cols, __pyx_t_7, __pyx_t_1) < 0)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":77 * # Copy left contexts. The last words aren't the left-context for anything. * cols[nW:, :nW] = seq[:-nW].reshape((-1, nW, I)) * cols[:, nW] = seq # <<<<<<<<<<<<<< * cols[:-nW, nW+1:] = seq[nW:].reshape((-1, nW, I)) * return cols.reshape((B, I * (2*nW+1))) */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_slice__3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_1); __pyx_t_1 = 0; if (unlikely(PyObject_SetItem(__pyx_v_cols, __pyx_t_7, __pyx_v_seq) < 0)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":78 * cols[nW:, :nW] = seq[:-nW].reshape((-1, nW, I)) * cols[:, nW] = seq * cols[:-nW, nW+1:] = seq[nW:].reshape((-1, nW, I)) # <<<<<<<<<<<<<< * return cols.reshape((B, I * (2*nW+1))) * */ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_seq, __pyx_v_nW, 0, NULL, NULL, NULL, 1, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_reshape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_7 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_int((-__pyx_v_nW)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = PySlice_New(Py_None, __pyx_t_6, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_nW + 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PySlice_New(__pyx_t_6, Py_None, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; if (unlikely(PyObject_SetItem(__pyx_v_cols, __pyx_t_6, __pyx_t_7) < 0)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":79 * cols[:, nW] = seq * cols[:-nW, nW+1:] = seq[nW:].reshape((-1, nW, I)) * return cols.reshape((B, I * (2*nW+1))) # <<<<<<<<<<<<<< * * def backprop_seq2col(self, dY, int nW): */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_cols, __pyx_n_s_reshape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyInt_From_long((__pyx_v_I * ((2 * __pyx_v_nW) + 1))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __pyx_t_5 = 0; __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_7 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_2, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":65 * self.xp = xp * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.Ops.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cols); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":81 * return cols.reshape((B, I * (2*nW+1))) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * # This is a test implementation that only supports nW=1 * assert nW == 1 */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_5backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_5backprop_seq2col = {"backprop_seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_5backprop_seq2col, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_5backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 1); __PYX_ERR(0, 81, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 2); __PYX_ERR(0, 81, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_seq2col") < 0)) __PYX_ERR(0, 81, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 81, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_4backprop_seq2col(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_4backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dY, int __pyx_v_nW) { int __pyx_v_nF; int __pyx_v_B; int __pyx_v_I; PyObject *__pyx_v_dX = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__4) __Pyx_RefNannySetupContext("backprop_seq2col", 0); __Pyx_TraceCall("backprop_seq2col", __pyx_f[0], 81, 0, __PYX_ERR(0, 81, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_dY); /* "thinc/neural/ops.pyx":83 * def backprop_seq2col(self, dY, int nW): * # This is a test implementation that only supports nW=1 * assert nW == 1 # <<<<<<<<<<<<<< * cdef int nF = nW*2+1 * cdef int B = dY.shape[0] */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_nW == 1) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 83, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":84 * # This is a test implementation that only supports nW=1 * assert nW == 1 * cdef int nF = nW*2+1 # <<<<<<<<<<<<<< * cdef int B = dY.shape[0] * cdef int I = dY.shape[1] / nF */ __pyx_v_nF = ((__pyx_v_nW * 2) + 1); /* "thinc/neural/ops.pyx":85 * assert nW == 1 * cdef int nF = nW*2+1 * cdef int B = dY.shape[0] # <<<<<<<<<<<<<< * cdef int I = dY.shape[1] / nF * # Having trouble getting the kernel to work... */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dY, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_B = __pyx_t_3; /* "thinc/neural/ops.pyx":86 * cdef int nF = nW*2+1 * cdef int B = dY.shape[0] * cdef int I = dY.shape[1] / nF # <<<<<<<<<<<<<< * # Having trouble getting the kernel to work... * dX = self.allocate((B, I)) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dY, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nF); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_I = __pyx_t_3; /* "thinc/neural/ops.pyx":88 * cdef int I = dY.shape[1] / nF * # Having trouble getting the kernel to work... * dX = self.allocate((B, I)) # <<<<<<<<<<<<<< * dY = dY.reshape((B, nF, I)) * dX[:-nW] += dY[nW:, :nW].reshape((-1, I)) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 88, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 88, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 88, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_4 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_6); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_dX = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":89 * # Having trouble getting the kernel to work... * dX = self.allocate((B, I)) * dY = dY.reshape((B, nF, I)) # <<<<<<<<<<<<<< * dX[:-nW] += dY[nW:, :nW].reshape((-1, I)) * dX += dY[:, nW] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dY, __pyx_n_s_reshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_nF); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_1); __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_4 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_7); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_dY, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":90 * dX = self.allocate((B, I)) * dY = dY.reshape((B, nF, I)) * dX[:-nW] += dY[nW:, :nW].reshape((-1, I)) # <<<<<<<<<<<<<< * dX += dY[:, nW] * dX[nW:] += dY[:-nW, nW+1:].reshape((-1, I)) */ __pyx_t_4 = __Pyx_PyInt_From_int((-__pyx_v_nW)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PySlice_New(Py_None, __pyx_t_4, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_dX, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PySlice_New(__pyx_t_1, Py_None, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = PySlice_New(Py_None, __pyx_t_1, Py_None); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_dY, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_reshape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_7 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_t_4, __pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(PyObject_SetItem(__pyx_v_dX, __pyx_t_2, __pyx_t_1) < 0)) __PYX_ERR(0, 90, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":91 * dY = dY.reshape((B, nF, I)) * dX[:-nW] += dY[nW:, :nW].reshape((-1, I)) * dX += dY[:, nW] # <<<<<<<<<<<<<< * dX[nW:] += dY[:-nW, nW+1:].reshape((-1, I)) * return dX */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_slice__3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_dY, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_dX, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_dX, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":92 * dX[:-nW] += dY[nW:, :nW].reshape((-1, I)) * dX += dY[:, nW] * dX[nW:] += dY[:-nW, nW+1:].reshape((-1, I)) # <<<<<<<<<<<<<< * return dX * */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PySlice_New(__pyx_t_1, Py_None, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_dX, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyInt_From_int((-__pyx_v_nW)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySlice_New(Py_None, __pyx_t_4, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_long((__pyx_v_nW + 1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PySlice_New(__pyx_t_4, Py_None, Py_None); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_dY, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_reshape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_7 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_t_1, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(PyObject_SetItem(__pyx_v_dX, __pyx_t_2, __pyx_t_4) < 0)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":93 * dX += dY[:, nW] * dX[nW:] += dY[:-nW, nW+1:].reshape((-1, I)) * return dX # <<<<<<<<<<<<<< * * def dropout_sequences(self, X, dropout, inplace=False): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_dX); __pyx_r = __pyx_v_dX; goto __pyx_L0; /* "thinc/neural/ops.pyx":81 * return cols.reshape((B, I * (2*nW+1))) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * # This is a test implementation that only supports nW=1 * assert nW == 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dX); __Pyx_XDECREF(__pyx_v_dY); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_sequences = {"dropout_sequences", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_sequences, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_dropout = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dropout_sequences (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_dropout,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dropout_sequences", 0, 3, 4, 1); __PYX_ERR(0, 95, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dropout)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dropout_sequences", 0, 3, 4, 2); __PYX_ERR(0, 95, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dropout_sequences") < 0)) __PYX_ERR(0, 95, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_dropout = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dropout_sequences", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 95, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_6dropout_sequences(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_dropout, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_1wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_1wrap_backprop = {"wrap_backprop", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_1wrap_backprop, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_1wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("wrap_backprop (wrapper)", 0); __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_wrap_backprop(__pyx_self, ((PyObject *)__pyx_v_backprop)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":100 * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * masked = [] * for i, mask in enumerate(masks): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_1finish_update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_1finish_update = {"finish_update", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_1finish_update, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_1finish_update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_args = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("finish_update (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); if (PyTuple_GET_SIZE(__pyx_args) > 1) { __pyx_v_args = PyTuple_GetSlice(__pyx_args, 1, PyTuple_GET_SIZE(__pyx_args)); if (unlikely(!__pyx_v_args)) { __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_RefNannyFinishContext(); return NULL; } __Pyx_GOTREF(__pyx_v_args); } else { __pyx_v_args = __pyx_empty_tuple; __Pyx_INCREF(__pyx_empty_tuple); } { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_gradient,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { default: case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { const Py_ssize_t used_pos_args = (pos_args < 1) ? pos_args : 1; if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, used_pos_args, "finish_update") < 0)) __PYX_ERR(0, 100, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) < 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_gradient = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("finish_update", 0, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 100, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_args); __pyx_v_args = 0; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences.wrap_backprop.finish_update", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_finish_update(__pyx_self, __pyx_v_gradient, __pyx_v_args, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_args); __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_finish_update(PyObject *__pyx_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *__pyx_cur_scope; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *__pyx_outer_scope; PyObject *__pyx_v_masked = NULL; CYTHON_UNUSED PyObject *__pyx_v_i = NULL; PyObject *__pyx_v_mask = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("finish_update", 0); __pyx_outer_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_TraceCall("finish_update", __pyx_f[0], 100, 0, __PYX_ERR(0, 100, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_gradient); /* "thinc/neural/ops.pyx":101 * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): * masked = [] # <<<<<<<<<<<<<< * for i, mask in enumerate(masks): * if inplace: */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_masked = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":102 * def finish_update(gradient, *args, **kwargs): * masked = [] * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * if inplace: * gradient *= mask */ __Pyx_INCREF(__pyx_int_0); __pyx_t_1 = __pyx_int_0; if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_masks)) { __Pyx_RaiseClosureNameError("masks"); __PYX_ERR(0, 102, __pyx_L1_error) } __pyx_t_2 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_masks; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; for (;;) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 102, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_XDECREF_SET(__pyx_v_mask, __pyx_t_4); __pyx_t_4 = 0; __Pyx_INCREF(__pyx_t_1); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_1); __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":103 * masked = [] * for i, mask in enumerate(masks): * if inplace: # <<<<<<<<<<<<<< * gradient *= mask * masked.append(gradient) */ if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_inplace)) { __Pyx_RaiseClosureNameError("inplace"); __PYX_ERR(0, 103, __pyx_L1_error) } __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_inplace); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 103, __pyx_L1_error) if (__pyx_t_5) { /* "thinc/neural/ops.pyx":104 * for i, mask in enumerate(masks): * if inplace: * gradient *= mask # <<<<<<<<<<<<<< * masked.append(gradient) * else: */ __pyx_t_4 = PyNumber_InPlaceMultiply(__pyx_v_gradient, __pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_gradient, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":105 * if inplace: * gradient *= mask * masked.append(gradient) # <<<<<<<<<<<<<< * else: * masked.append(gradient * mask) */ __pyx_t_6 = __Pyx_PyList_Append(__pyx_v_masked, __pyx_v_gradient); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(0, 105, __pyx_L1_error) /* "thinc/neural/ops.pyx":103 * masked = [] * for i, mask in enumerate(masks): * if inplace: # <<<<<<<<<<<<<< * gradient *= mask * masked.append(gradient) */ goto __pyx_L5; } /* "thinc/neural/ops.pyx":107 * masked.append(gradient) * else: * masked.append(gradient * mask) # <<<<<<<<<<<<<< * return backprop(masked, *args, **kwargs) * return finish_update */ /*else*/ { __pyx_t_4 = PyNumber_Multiply(__pyx_v_gradient, __pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyList_Append(__pyx_v_masked, __pyx_t_4); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_L5:; /* "thinc/neural/ops.pyx":102 * def finish_update(gradient, *args, **kwargs): * masked = [] * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * if inplace: * gradient *= mask */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":108 * else: * masked.append(gradient * mask) * return backprop(masked, *args, **kwargs) # <<<<<<<<<<<<<< * return finish_update * if inplace: */ __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_cur_scope->__pyx_v_backprop)) { __Pyx_RaiseClosureNameError("backprop"); __PYX_ERR(0, 108, __pyx_L1_error) } __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_masked); __Pyx_GIVEREF(__pyx_v_masked); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_masked); __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_v_args); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_cur_scope->__pyx_v_backprop, __pyx_t_2, __pyx_v_kwargs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":100 * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * masked = [] * for i, mask in enumerate(masks): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences.wrap_backprop.finish_update", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_masked); __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_mask); __Pyx_XDECREF(__pyx_v_gradient); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_17dropout_sequences_wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *__pyx_cur_scope; PyObject *__pyx_v_finish_update = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("wrap_backprop", 0); __pyx_cur_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 99, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *) __Pyx_CyFunction_GetClosure(__pyx_self); __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope)); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope); __Pyx_TraceCall("wrap_backprop", __pyx_f[0], 99, 0, __PYX_ERR(0, 99, __pyx_L1_error)); __pyx_cur_scope->__pyx_v_backprop = __pyx_v_backprop; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_backprop); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_backprop); /* "thinc/neural/ops.pyx":100 * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * masked = [] * for i, mask in enumerate(masks): */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_13wrap_backprop_1finish_update, 0, __pyx_n_s_Ops_dropout_sequences_locals_wra, ((PyObject*)__pyx_cur_scope), __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__6)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_finish_update = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":109 * masked.append(gradient * mask) * return backprop(masked, *args, **kwargs) * return finish_update # <<<<<<<<<<<<<< * if inplace: * for i, mask in enumerate(masks): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_finish_update); __pyx_r = __pyx_v_finish_update; goto __pyx_L0; /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences.wrap_backprop", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_finish_update); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":97 * def dropout_sequences(self, X, dropout, inplace=False): * if dropout is None or dropout <= 0.0: * return X, lambda func: func # <<<<<<<<<<<<<< * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_2lambda(PyObject *__pyx_self, PyObject *__pyx_v_func); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_2lambda = {"lambda", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_2lambda, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17dropout_sequences_2lambda(PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda(__pyx_self, ((PyObject *)__pyx_v_func)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("lambda", 0); __Pyx_TraceCall("lambda", __pyx_f[0], 97, 0, __PYX_ERR(0, 97, __pyx_L1_error)); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_func); __pyx_r = __pyx_v_func; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences.lambda", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_6dropout_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dropout, PyObject *__pyx_v_inplace) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *__pyx_cur_scope; PyObject *__pyx_v_wrap_backprop = 0; PyObject *__pyx_v_i = NULL; PyObject *__pyx_v_mask = NULL; PyObject *__pyx_v_masked = NULL; PyObject *__pyx_v_x = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; PyObject *(*__pyx_t_7)(PyObject *); PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_t_12; PyObject *__pyx_t_13 = NULL; int __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__7) __Pyx_RefNannySetupContext("dropout_sequences", 0); __pyx_cur_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 95, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __Pyx_TraceCall("dropout_sequences", __pyx_f[0], 95, 0, __PYX_ERR(0, 95, __pyx_L1_error)); __pyx_cur_scope->__pyx_v_inplace = __pyx_v_inplace; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_inplace); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_inplace); /* "thinc/neural/ops.pyx":96 * * def dropout_sequences(self, X, dropout, inplace=False): * if dropout is None or dropout <= 0.0: # <<<<<<<<<<<<<< * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] */ __pyx_t_2 = (__pyx_v_dropout == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = PyObject_RichCompare(__pyx_v_dropout, __pyx_float_0_0, Py_LE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 96, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __pyx_t_3; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":97 * def dropout_sequences(self, X, dropout, inplace=False): * if dropout is None or dropout <= 0.0: * return X, lambda func: func # <<<<<<<<<<<<<< * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_2lambda, 0, __pyx_n_s_Ops_dropout_sequences_locals_lam, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_X); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":96 * * def dropout_sequences(self, X, dropout, inplace=False): * if dropout is None or dropout <= 0.0: # <<<<<<<<<<<<<< * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] */ } /* "thinc/neural/ops.pyx":98 * if dropout is None or dropout <= 0.0: * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] # <<<<<<<<<<<<<< * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (likely(PyList_CheckExact(__pyx_v_X)) || PyTuple_CheckExact(__pyx_v_X)) { __pyx_t_4 = __pyx_v_X; __Pyx_INCREF(__pyx_t_4); __pyx_t_6 = 0; __pyx_t_7 = NULL; } else { __pyx_t_6 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_X); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 98, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_7)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_8 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 98, __pyx_L1_error) #else __pyx_t_8 = PySequence_ITEM(__pyx_t_4, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); #endif } else { if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 98, __pyx_L1_error) #else __pyx_t_8 = PySequence_ITEM(__pyx_t_4, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); #endif } } else { __pyx_t_8 = __pyx_t_7(__pyx_t_4); if (unlikely(!__pyx_t_8)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 98, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_8); } __Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_get_dropout_mask); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = NULL; __pyx_t_12 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); __pyx_t_12 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_t_10, __pyx_v_dropout}; __pyx_t_8 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_t_10, __pyx_v_dropout}; __pyx_t_8 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif { __pyx_t_13 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); if (__pyx_t_11) { __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_11); __pyx_t_11 = NULL; } __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_13, 0+__pyx_t_12, __pyx_t_10); __Pyx_INCREF(__pyx_v_dropout); __Pyx_GIVEREF(__pyx_v_dropout); PyTuple_SET_ITEM(__pyx_t_13, 1+__pyx_t_12, __pyx_v_dropout); __pyx_t_10 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_13, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_8))) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GIVEREF(__pyx_t_5); __pyx_cur_scope->__pyx_v_masks = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_17dropout_sequences_1wrap_backprop, 0, __pyx_n_s_Ops_dropout_sequences_locals_wra_2, ((PyObject*)__pyx_cur_scope), __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__9)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_wrap_backprop = __pyx_t_5; __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":110 * return backprop(masked, *args, **kwargs) * return finish_update * if inplace: # <<<<<<<<<<<<<< * for i, mask in enumerate(masks): * X[i] *= mask */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_cur_scope->__pyx_v_inplace); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 110, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":111 * return finish_update * if inplace: * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * X[i] *= mask * return X, wrap_backprop */ __Pyx_INCREF(__pyx_int_0); __pyx_t_5 = __pyx_int_0; __pyx_t_4 = __pyx_cur_scope->__pyx_v_masks; __Pyx_INCREF(__pyx_t_4); __pyx_t_6 = 0; for (;;) { if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_8 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 111, __pyx_L1_error) #else __pyx_t_8 = PySequence_ITEM(__pyx_t_4, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); #endif __Pyx_XDECREF_SET(__pyx_v_mask, __pyx_t_8); __pyx_t_8 = 0; __Pyx_INCREF(__pyx_t_5); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5); __pyx_t_8 = __Pyx_PyInt_AddObjC(__pyx_t_5, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = __pyx_t_8; __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":112 * if inplace: * for i, mask in enumerate(masks): * X[i] *= mask # <<<<<<<<<<<<<< * return X, wrap_backprop * else: */ __Pyx_INCREF(__pyx_v_i); __pyx_t_8 = __pyx_v_i; __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_v_X, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_13 = PyNumber_InPlaceMultiply(__pyx_t_9, __pyx_v_mask); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(PyObject_SetItem(__pyx_v_X, __pyx_t_8, __pyx_t_13) < 0)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":111 * return finish_update * if inplace: * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * X[i] *= mask * return X, wrap_backprop */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":113 * for i, mask in enumerate(masks): * X[i] *= mask * return X, wrap_backprop # <<<<<<<<<<<<<< * else: * masked = [] */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_X); __Pyx_INCREF(__pyx_v_wrap_backprop); __Pyx_GIVEREF(__pyx_v_wrap_backprop); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_wrap_backprop); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":110 * return backprop(masked, *args, **kwargs) * return finish_update * if inplace: # <<<<<<<<<<<<<< * for i, mask in enumerate(masks): * X[i] *= mask */ } /* "thinc/neural/ops.pyx":115 * return X, wrap_backprop * else: * masked = [] # <<<<<<<<<<<<<< * for i, mask in enumerate(masks): * masked.append(X[i] * mask) */ /*else*/ { __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_masked = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":116 * else: * masked = [] * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * masked.append(X[i] * mask) * return masked, wrap_backprop */ __Pyx_INCREF(__pyx_int_0); __pyx_t_5 = __pyx_int_0; __pyx_t_4 = __pyx_cur_scope->__pyx_v_masks; __Pyx_INCREF(__pyx_t_4); __pyx_t_6 = 0; for (;;) { if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_8 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 116, __pyx_L1_error) #else __pyx_t_8 = PySequence_ITEM(__pyx_t_4, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); #endif __Pyx_XDECREF_SET(__pyx_v_mask, __pyx_t_8); __pyx_t_8 = 0; __Pyx_INCREF(__pyx_t_5); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5); __pyx_t_8 = __Pyx_PyInt_AddObjC(__pyx_t_5, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = __pyx_t_8; __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":117 * masked = [] * for i, mask in enumerate(masks): * masked.append(X[i] * mask) # <<<<<<<<<<<<<< * return masked, wrap_backprop * */ __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_v_X, __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_13 = PyNumber_Multiply(__pyx_t_8, __pyx_v_mask); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_masked, __pyx_t_13); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; /* "thinc/neural/ops.pyx":116 * else: * masked = [] * for i, mask in enumerate(masks): # <<<<<<<<<<<<<< * masked.append(X[i] * mask) * return masked, wrap_backprop */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":118 * for i, mask in enumerate(masks): * masked.append(X[i] * mask) * return masked, wrap_backprop # <<<<<<<<<<<<<< * * def dropout(self, x, dropout, inplace=False): */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_masked); __Pyx_GIVEREF(__pyx_v_masked); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_masked); __Pyx_INCREF(__pyx_v_wrap_backprop); __Pyx_GIVEREF(__pyx_v_wrap_backprop); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_wrap_backprop); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_13); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_wrap_backprop); __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_mask); __Pyx_XDECREF(__pyx_v_masked); __Pyx_XDECREF(__pyx_v_x); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_9dropout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_9dropout = {"dropout", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_9dropout, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_9dropout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_dropout = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dropout (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_dropout,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dropout", 0, 3, 4, 1); __PYX_ERR(0, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dropout)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dropout", 0, 3, 4, 2); __PYX_ERR(0, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dropout") < 0)) __PYX_ERR(0, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_dropout = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dropout", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_8dropout(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_dropout, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_1wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_1wrap_backprop = {"wrap_backprop", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_1wrap_backprop, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_1wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("wrap_backprop (wrapper)", 0); __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_wrap_backprop(__pyx_self, ((PyObject *)__pyx_v_backprop)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":127 * return x, lambda func: func * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * return backprop(gradient * mask, *args, **kwargs) * return finish_update */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_1finish_update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_1finish_update = {"finish_update", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_1finish_update, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_1finish_update(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_args = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("finish_update (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); if (PyTuple_GET_SIZE(__pyx_args) > 1) { __pyx_v_args = PyTuple_GetSlice(__pyx_args, 1, PyTuple_GET_SIZE(__pyx_args)); if (unlikely(!__pyx_v_args)) { __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_RefNannyFinishContext(); return NULL; } __Pyx_GOTREF(__pyx_v_args); } else { __pyx_v_args = __pyx_empty_tuple; __Pyx_INCREF(__pyx_empty_tuple); } { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_gradient,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { default: case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { const Py_ssize_t used_pos_args = (pos_args < 1) ? pos_args : 1; if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, used_pos_args, "finish_update") < 0)) __PYX_ERR(0, 127, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) < 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_gradient = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("finish_update", 0, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 127, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_args); __pyx_v_args = 0; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout.wrap_backprop.finish_update", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_finish_update(__pyx_self, __pyx_v_gradient, __pyx_v_args, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_args); __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_finish_update(PyObject *__pyx_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *__pyx_cur_scope; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("finish_update", 0); __pyx_outer_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_TraceCall("finish_update", __pyx_f[0], 127, 0, __PYX_ERR(0, 127, __pyx_L1_error)); /* "thinc/neural/ops.pyx":128 * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) # <<<<<<<<<<<<<< * return finish_update * if inplace: */ __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_cur_scope->__pyx_v_backprop)) { __Pyx_RaiseClosureNameError("backprop"); __PYX_ERR(0, 128, __pyx_L1_error) } if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_mask)) { __Pyx_RaiseClosureNameError("mask"); __PYX_ERR(0, 128, __pyx_L1_error) } __pyx_t_1 = PyNumber_Multiply(__pyx_v_gradient, __pyx_cur_scope->__pyx_outer_scope->__pyx_v_mask); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_v_args); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_cur_scope->__pyx_v_backprop, __pyx_t_1, __pyx_v_kwargs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":127 * return x, lambda func: func * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * return backprop(gradient * mask, *args, **kwargs) * return finish_update */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout.wrap_backprop.finish_update", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_7dropout_wrap_backprop(PyObject *__pyx_self, PyObject *__pyx_v_backprop) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *__pyx_cur_scope; PyObject *__pyx_v_finish_update = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("wrap_backprop", 0); __pyx_cur_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 126, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *) __Pyx_CyFunction_GetClosure(__pyx_self); __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope)); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope); __Pyx_TraceCall("wrap_backprop", __pyx_f[0], 126, 0, __PYX_ERR(0, 126, __pyx_L1_error)); __pyx_cur_scope->__pyx_v_backprop = __pyx_v_backprop; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_backprop); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_backprop); /* "thinc/neural/ops.pyx":127 * return x, lambda func: func * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * return backprop(gradient * mask, *args, **kwargs) * return finish_update */ __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_13wrap_backprop_1finish_update, 0, __pyx_n_s_Ops_dropout_locals_wrap_backprop, ((PyObject*)__pyx_cur_scope), __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__11)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_finish_update = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":129 * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) * return finish_update # <<<<<<<<<<<<<< * if inplace: * x *= mask */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_finish_update); __pyx_r = __pyx_v_finish_update; goto __pyx_L0; /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout.wrap_backprop", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_finish_update); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":122 * def dropout(self, x, dropout, inplace=False): * if dropout is None or dropout <= 0.0: * return x, lambda func: func # <<<<<<<<<<<<<< * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_2lambda1(PyObject *__pyx_self, PyObject *__pyx_v_func); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_2lambda1 = {"lambda1", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_2lambda1, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_2lambda1(PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda1 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda1(__pyx_self, ((PyObject *)__pyx_v_func)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("lambda1", 0); __Pyx_TraceCall("lambda1", __pyx_f[0], 122, 0, __PYX_ERR(0, 122, __pyx_L1_error)); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_func); __pyx_r = __pyx_v_func; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout.lambda1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":125 * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: * return x, lambda func: func # <<<<<<<<<<<<<< * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_3lambda2(PyObject *__pyx_self, PyObject *__pyx_v_func); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_3lambda2 = {"lambda2", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_3lambda2, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_7dropout_3lambda2(PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda2 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda2(__pyx_self, ((PyObject *)__pyx_v_func)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("lambda2", 0); __Pyx_TraceCall("lambda2", __pyx_f[0], 125, 0, __PYX_ERR(0, 125, __pyx_L1_error)); __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_func); __pyx_r = __pyx_v_func; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout.lambda2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_8dropout(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_dropout, PyObject *__pyx_v_inplace) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *__pyx_cur_scope; PyObject *__pyx_v_wrap_backprop = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__12) __Pyx_RefNannySetupContext("dropout", 0); __pyx_cur_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_2_dropout, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 120, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __Pyx_TraceCall("dropout", __pyx_f[0], 120, 0, __PYX_ERR(0, 120, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_x); /* "thinc/neural/ops.pyx":121 * * def dropout(self, x, dropout, inplace=False): * if dropout is None or dropout <= 0.0: # <<<<<<<<<<<<<< * return x, lambda func: func * mask = self.get_dropout_mask(x.shape, dropout) */ __pyx_t_2 = (__pyx_v_dropout == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = PyObject_RichCompare(__pyx_v_dropout, __pyx_float_0_0, Py_LE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 121, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 121, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __pyx_t_3; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":122 * def dropout(self, x, dropout, inplace=False): * if dropout is None or dropout <= 0.0: * return x, lambda func: func # <<<<<<<<<<<<<< * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_2lambda1, 0, __pyx_n_s_Ops_dropout_locals_lambda, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 122, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 122, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_x); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":121 * * def dropout(self, x, dropout, inplace=False): * if dropout is None or dropout <= 0.0: # <<<<<<<<<<<<<< * return x, lambda func: func * mask = self.get_dropout_mask(x.shape, dropout) */ } /* "thinc/neural/ops.pyx":123 * if dropout is None or dropout <= 0.0: * return x, lambda func: func * mask = self.get_dropout_mask(x.shape, dropout) # <<<<<<<<<<<<<< * if mask is None: * return x, lambda func: func */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_get_dropout_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_dropout}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_dropout}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_dropout); __Pyx_GIVEREF(__pyx_v_dropout); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_dropout); __pyx_t_6 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GIVEREF(__pyx_t_5); __pyx_cur_scope->__pyx_v_mask = __pyx_t_5; __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":124 * return x, lambda func: func * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: # <<<<<<<<<<<<<< * return x, lambda func: func * def wrap_backprop(backprop): */ __pyx_t_1 = (__pyx_cur_scope->__pyx_v_mask == Py_None); __pyx_t_3 = (__pyx_t_1 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":125 * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: * return x, lambda func: func # <<<<<<<<<<<<<< * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_3lambda2, 0, __pyx_n_s_Ops_dropout_locals_lambda, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_x); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":124 * return x, lambda func: func * mask = self.get_dropout_mask(x.shape, dropout) * if mask is None: # <<<<<<<<<<<<<< * return x, lambda func: func * def wrap_backprop(backprop): */ } /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_1wrap_backprop, 0, __pyx_n_s_Ops_dropout_locals_wrap_backprop_2, ((PyObject*)__pyx_cur_scope), __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__14)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_wrap_backprop = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":130 * return backprop(gradient * mask, *args, **kwargs) * return finish_update * if inplace: # <<<<<<<<<<<<<< * x *= mask * return x, wrap_backprop */ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 130, __pyx_L1_error) if (__pyx_t_3) { /* "thinc/neural/ops.pyx":131 * return finish_update * if inplace: * x *= mask # <<<<<<<<<<<<<< * return x, wrap_backprop * else: */ __pyx_t_4 = PyNumber_InPlaceMultiply(__pyx_v_x, __pyx_cur_scope->__pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":132 * if inplace: * x *= mask * return x, wrap_backprop # <<<<<<<<<<<<<< * else: * return x * mask, wrap_backprop */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_x); __Pyx_INCREF(__pyx_v_wrap_backprop); __Pyx_GIVEREF(__pyx_v_wrap_backprop); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_wrap_backprop); __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":130 * return backprop(gradient * mask, *args, **kwargs) * return finish_update * if inplace: # <<<<<<<<<<<<<< * x *= mask * return x, wrap_backprop */ } /* "thinc/neural/ops.pyx":134 * return x, wrap_backprop * else: * return x * mask, wrap_backprop # <<<<<<<<<<<<<< * * def flatten(self, X, dtype=None, pad=0): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_4 = PyNumber_Multiply(__pyx_v_x, __pyx_cur_scope->__pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_INCREF(__pyx_v_wrap_backprop); __Pyx_GIVEREF(__pyx_v_wrap_backprop); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_wrap_backprop); __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("thinc.neural.ops.Ops.dropout", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_wrap_backprop); __Pyx_XDECREF(__pyx_v_x); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":136 * return x * mask, wrap_backprop * * def flatten(self, X, dtype=None, pad=0): # <<<<<<<<<<<<<< * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_11flatten(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_11flatten = {"flatten", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_11flatten, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_11flatten(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_dtype = 0; PyObject *__pyx_v_pad = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("flatten (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_dtype,&__pyx_n_s_pad,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); values[3] = ((PyObject *)((PyObject *)__pyx_int_0)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("flatten", 0, 2, 4, 1); __PYX_ERR(0, 136, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pad); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flatten") < 0)) __PYX_ERR(0, 136, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_dtype = values[2]; __pyx_v_pad = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("flatten", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 136, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.flatten", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_10flatten(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_dtype, __pyx_v_pad); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_10flatten(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dtype, PyObject *__pyx_v_pad) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_padded = NULL; PyObject *__pyx_v_x = NULL; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *(*__pyx_t_9)(PyObject *); PyObject *__pyx_t_10 = NULL; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__15) __Pyx_RefNannySetupContext("flatten", 0); __Pyx_TraceCall("flatten", __pyx_f[0], 136, 0, __PYX_ERR(0, 136, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_X); /* "thinc/neural/ops.pyx":137 * * def flatten(self, X, dtype=None, pad=0): * if X is None or len(X) == 0: # <<<<<<<<<<<<<< * return self.allocate((0,), dtype=dtype or 'f') * xp = get_array_module(X[0]) */ __pyx_t_2 = (__pyx_v_X == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = PyObject_Length(__pyx_v_X); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 137, __pyx_L1_error) __pyx_t_3 = ((__pyx_t_4 == 0) != 0); __pyx_t_1 = __pyx_t_3; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":138 * def flatten(self, X, dtype=None, pad=0): * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') # <<<<<<<<<<<<<< * xp = get_array_module(X[0]) * X = [x for x in X if x.size != 0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_dtype); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 138, __pyx_L1_error) if (!__pyx_t_1) { } else { __Pyx_INCREF(__pyx_v_dtype); __pyx_t_7 = __pyx_v_dtype; goto __pyx_L6_bool_binop_done; } __Pyx_INCREF(__pyx_n_s_f); __pyx_t_7 = __pyx_n_s_f; __pyx_L6_bool_binop_done:; if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_7) < 0) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_tuple__17, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":137 * * def flatten(self, X, dtype=None, pad=0): * if X is None or len(X) == 0: # <<<<<<<<<<<<<< * return self.allocate((0,), dtype=dtype or 'f') * xp = get_array_module(X[0]) */ } /* "thinc/neural/ops.pyx":139 * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') * xp = get_array_module(X[0]) # <<<<<<<<<<<<<< * X = [x for x in X if x.size != 0] * if int(pad) >= 1: */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_X, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_7 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_8, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_xp = __pyx_t_7; __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":140 * return self.allocate((0,), dtype=dtype or 'f') * xp = get_array_module(X[0]) * X = [x for x in X if x.size != 0] # <<<<<<<<<<<<<< * if int(pad) >= 1: * padded = [] */ __pyx_t_7 = PyList_New(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (likely(PyList_CheckExact(__pyx_v_X)) || PyTuple_CheckExact(__pyx_v_X)) { __pyx_t_6 = __pyx_v_X; __Pyx_INCREF(__pyx_t_6); __pyx_t_4 = 0; __pyx_t_9 = NULL; } else { __pyx_t_4 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_v_X); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_9 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 140, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_4); __Pyx_INCREF(__pyx_t_5); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 140, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_4); __Pyx_INCREF(__pyx_t_5); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 140, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_9(__pyx_t_6); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 140, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = __Pyx_PyInt_NeObjC(__pyx_t_5, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 140, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__pyx_t_1) { if (unlikely(__Pyx_ListComp_Append(__pyx_t_7, (PyObject*)__pyx_v_x))) __PYX_ERR(0, 140, __pyx_L1_error) } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":141 * xp = get_array_module(X[0]) * X = [x for x in X if x.size != 0] * if int(pad) >= 1: # <<<<<<<<<<<<<< * padded = [] * for x in X: */ __pyx_t_7 = __Pyx_PyNumber_Int(__pyx_v_pad); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyObject_RichCompare(__pyx_t_7, __pyx_int_1, Py_GE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 141, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":142 * X = [x for x in X if x.size != 0] * if int(pad) >= 1: * padded = [] # <<<<<<<<<<<<<< * for x in X: * padded.append( */ __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_v_padded = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":143 * if int(pad) >= 1: * padded = [] * for x in X: # <<<<<<<<<<<<<< * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) */ if (likely(PyList_CheckExact(__pyx_v_X)) || PyTuple_CheckExact(__pyx_v_X)) { __pyx_t_6 = __pyx_v_X; __Pyx_INCREF(__pyx_t_6); __pyx_t_4 = 0; __pyx_t_9 = NULL; } else { __pyx_t_4 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_v_X); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_9 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 143, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_4); __Pyx_INCREF(__pyx_t_7); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 143, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_6, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_4); __Pyx_INCREF(__pyx_t_7); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 143, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_6, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_9(__pyx_t_6); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 143, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":145 * for x in X: * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) # <<<<<<<<<<<<<< * padded.append(x) * padded.append( */ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_zeros); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_pad); __Pyx_GIVEREF(__pyx_v_pad); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_pad); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_GetSlice(__pyx_t_5, 1, 0, NULL, NULL, &__pyx_slice__18, 1, 0, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Add(__pyx_t_8, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_8) < 0) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_10, __pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":144 * padded = [] * for x in X: * padded.append( # <<<<<<<<<<<<<< * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * padded.append(x) */ __pyx_t_11 = __Pyx_PyList_Append(__pyx_v_padded, __pyx_t_8); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 144, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":146 * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * padded.append(x) # <<<<<<<<<<<<<< * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) */ __pyx_t_11 = __Pyx_PyList_Append(__pyx_v_padded, __pyx_v_x); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 146, __pyx_L1_error) /* "thinc/neural/ops.pyx":143 * if int(pad) >= 1: * padded = [] * for x in X: # <<<<<<<<<<<<<< * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) */ } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":148 * padded.append(x) * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) # <<<<<<<<<<<<<< * X = padded * result = xp.concatenate(X) */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_pad); __Pyx_GIVEREF(__pyx_v_pad); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_pad); if (unlikely(!__pyx_v_x)) { __Pyx_RaiseUnboundLocalError("x"); __PYX_ERR(0, 148, __pyx_L1_error) } __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_GetSlice(__pyx_t_5, 1, 0, NULL, NULL, &__pyx_slice__18, 1, 0, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Add(__pyx_t_8, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(!__pyx_v_x)) { __Pyx_RaiseUnboundLocalError("x"); __PYX_ERR(0, 148, __pyx_L1_error) } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_8) < 0) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_10, __pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":147 * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * padded.append(x) * padded.append( # <<<<<<<<<<<<<< * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * X = padded */ __pyx_t_11 = __Pyx_PyList_Append(__pyx_v_padded, __pyx_t_8); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(0, 147, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":149 * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * X = padded # <<<<<<<<<<<<<< * result = xp.concatenate(X) * if dtype is not None: */ __Pyx_INCREF(__pyx_v_padded); __Pyx_DECREF_SET(__pyx_v_X, __pyx_v_padded); /* "thinc/neural/ops.pyx":141 * xp = get_array_module(X[0]) * X = [x for x in X if x.size != 0] * if int(pad) >= 1: # <<<<<<<<<<<<<< * padded = [] * for x in X: */ } /* "thinc/neural/ops.pyx":150 * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) * X = padded * result = xp.concatenate(X) # <<<<<<<<<<<<<< * if dtype is not None: * result = xp.asarray(result, dtype=dtype) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_concatenate); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_8 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_10, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_X); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_8; __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":151 * X = padded * result = xp.concatenate(X) * if dtype is not None: # <<<<<<<<<<<<<< * result = xp.asarray(result, dtype=dtype) * return result */ __pyx_t_1 = (__pyx_v_dtype != Py_None); __pyx_t_3 = (__pyx_t_1 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":152 * result = xp.concatenate(X) * if dtype is not None: * result = xp.asarray(result, dtype=dtype) # <<<<<<<<<<<<<< * return result * */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_asarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_result); __pyx_t_10 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 152, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_5, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":151 * X = padded * result = xp.concatenate(X) * if dtype is not None: # <<<<<<<<<<<<<< * result = xp.asarray(result, dtype=dtype) * return result */ } /* "thinc/neural/ops.pyx":153 * if dtype is not None: * result = xp.asarray(result, dtype=dtype) * return result # <<<<<<<<<<<<<< * * def unflatten(self, X, lengths, pad=0): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "thinc/neural/ops.pyx":136 * return x * mask, wrap_backprop * * def flatten(self, X, dtype=None, pad=0): # <<<<<<<<<<<<<< * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("thinc.neural.ops.Ops.flatten", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_padded); __Pyx_XDECREF(__pyx_v_x); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_X); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":155 * return result * * def unflatten(self, X, lengths, pad=0): # <<<<<<<<<<<<<< * unflat = [] * pad = int(pad) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_13unflatten(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_13unflatten = {"unflatten", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_13unflatten, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_13unflatten(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_lengths = 0; PyObject *__pyx_v_pad = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("unflatten (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,&__pyx_n_s_pad,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_0)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("unflatten", 0, 3, 4, 1); __PYX_ERR(0, 155, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("unflatten", 0, 3, 4, 2); __PYX_ERR(0, 155, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pad); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "unflatten") < 0)) __PYX_ERR(0, 155, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_lengths = values[2]; __pyx_v_pad = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("unflatten", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 155, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.unflatten", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_12unflatten(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths, __pyx_v_pad); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_12unflatten(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths, PyObject *__pyx_v_pad) { PyObject *__pyx_v_unflat = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__19) __Pyx_RefNannySetupContext("unflatten", 0); __Pyx_TraceCall("unflatten", __pyx_f[0], 155, 0, __PYX_ERR(0, 155, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_X); __Pyx_INCREF(__pyx_v_pad); /* "thinc/neural/ops.pyx":156 * * def unflatten(self, X, lengths, pad=0): * unflat = [] # <<<<<<<<<<<<<< * pad = int(pad) * for length in lengths: */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_unflat = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":157 * def unflatten(self, X, lengths, pad=0): * unflat = [] * pad = int(pad) # <<<<<<<<<<<<<< * for length in lengths: * length = int(length) */ __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_v_pad); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 157, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_pad, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":158 * unflat = [] * pad = int(pad) * for length in lengths: # <<<<<<<<<<<<<< * length = int(length) * if pad >= 1 and length != 0: */ if (likely(PyList_CheckExact(__pyx_v_lengths)) || PyTuple_CheckExact(__pyx_v_lengths)) { __pyx_t_1 = __pyx_v_lengths; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_lengths); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 158, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 158, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 158, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 158, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":159 * pad = int(pad) * for length in lengths: * length = int(length) # <<<<<<<<<<<<<< * if pad >= 1 and length != 0: * X = X[pad:] */ __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_v_length); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_length, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":160 * for length in lengths: * length = int(length) * if pad >= 1 and length != 0: # <<<<<<<<<<<<<< * X = X[pad:] * unflat.append(X[:length]) */ __pyx_t_4 = PyObject_RichCompare(__pyx_v_pad, __pyx_int_1, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L6_bool_binop_done; } __pyx_t_4 = __Pyx_PyInt_NeObjC(__pyx_v_length, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 160, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __pyx_t_6; __pyx_L6_bool_binop_done:; if (__pyx_t_5) { /* "thinc/neural/ops.pyx":161 * length = int(length) * if pad >= 1 and length != 0: * X = X[pad:] # <<<<<<<<<<<<<< * unflat.append(X[:length]) * X = X[length:] */ __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_X, 0, 0, &__pyx_v_pad, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":160 * for length in lengths: * length = int(length) * if pad >= 1 and length != 0: # <<<<<<<<<<<<<< * X = X[pad:] * unflat.append(X[:length]) */ } /* "thinc/neural/ops.pyx":162 * if pad >= 1 and length != 0: * X = X[pad:] * unflat.append(X[:length]) # <<<<<<<<<<<<<< * X = X[length:] * if pad >= 1: */ __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_X, 0, 0, NULL, &__pyx_v_length, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_unflat, __pyx_t_4); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(0, 162, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":163 * X = X[pad:] * unflat.append(X[:length]) * X = X[length:] # <<<<<<<<<<<<<< * if pad >= 1: * X = X[pad:] */ __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_X, 0, 0, &__pyx_v_length, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":158 * unflat = [] * pad = int(pad) * for length in lengths: # <<<<<<<<<<<<<< * length = int(length) * if pad >= 1 and length != 0: */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":164 * unflat.append(X[:length]) * X = X[length:] * if pad >= 1: # <<<<<<<<<<<<<< * X = X[pad:] * assert len(X) == 0 */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_pad, __pyx_int_1, Py_GE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 164, __pyx_L1_error) __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 164, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_5) { /* "thinc/neural/ops.pyx":165 * X = X[length:] * if pad >= 1: * X = X[pad:] # <<<<<<<<<<<<<< * assert len(X) == 0 * assert len(unflat) == len(lengths) */ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_X, 0, 0, &__pyx_v_pad, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":164 * unflat.append(X[:length]) * X = X[length:] * if pad >= 1: # <<<<<<<<<<<<<< * X = X[pad:] * assert len(X) == 0 */ } /* "thinc/neural/ops.pyx":166 * if pad >= 1: * X = X[pad:] * assert len(X) == 0 # <<<<<<<<<<<<<< * assert len(unflat) == len(lengths) * return unflat */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_2 = PyObject_Length(__pyx_v_X); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 166, __pyx_L1_error) if (unlikely(!((__pyx_t_2 == 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 166, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":167 * X = X[pad:] * assert len(X) == 0 * assert len(unflat) == len(lengths) # <<<<<<<<<<<<<< * return unflat * */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { __pyx_t_2 = PyList_GET_SIZE(__pyx_v_unflat); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 167, __pyx_L1_error) __pyx_t_8 = PyObject_Length(__pyx_v_lengths); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 167, __pyx_L1_error) if (unlikely(!((__pyx_t_2 == __pyx_t_8) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 167, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":168 * assert len(X) == 0 * assert len(unflat) == len(lengths) * return unflat # <<<<<<<<<<<<<< * * def square_sequences(self, seqs): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_unflat); __pyx_r = __pyx_v_unflat; goto __pyx_L0; /* "thinc/neural/ops.pyx":155 * return result * * def unflatten(self, X, lengths, pad=0): # <<<<<<<<<<<<<< * unflat = [] * pad = int(pad) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.unflatten", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_unflat); __Pyx_XDECREF(__pyx_v_length); __Pyx_XDECREF(__pyx_v_X); __Pyx_XDECREF(__pyx_v_pad); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_15square_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5thinc_6neural_3ops_3Ops_14square_sequences[] = "Sort a batch of sequence by decreasing length, pad, and transpose\n so that the outer dimension is the timestep. Return the padded batch,\n along with an array indicating the actual length at each step, and a callback\n to reverse the transformation.\n "; static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_15square_sequences = {"square_sequences", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_15square_sequences, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5thinc_6neural_3ops_3Ops_14square_sequences}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_15square_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_seqs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("square_sequences (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_seqs,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seqs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("square_sequences", 1, 2, 2, 1); __PYX_ERR(0, 170, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "square_sequences") < 0)) __PYX_ERR(0, 170, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_seqs = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("square_sequences", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 170, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.square_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_14square_sequences(__pyx_self, __pyx_v_self, __pyx_v_seqs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":197 * break * batch_size_at_t[t] = i * def unpad(padded): # <<<<<<<<<<<<<< * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_16square_sequences_1unpad(PyObject *__pyx_self, PyObject *__pyx_v_padded); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_16square_sequences_1unpad = {"unpad", (PyCFunction)__pyx_pw_5thinc_6neural_3ops_3Ops_16square_sequences_1unpad, METH_O, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_16square_sequences_1unpad(PyObject *__pyx_self, PyObject *__pyx_v_padded) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("unpad (wrapper)", 0); __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_16square_sequences_unpad(__pyx_self, ((PyObject *)__pyx_v_padded)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_16square_sequences_unpad(PyObject *__pyx_self, PyObject *__pyx_v_padded) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *__pyx_cur_scope; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *__pyx_outer_scope; PyObject *__pyx_v_unpadded = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *(*__pyx_t_8)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("unpad", 0); __pyx_outer_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_TraceCall("unpad", __pyx_f[0], 197, 0, __PYX_ERR(0, 197, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_padded); /* "thinc/neural/ops.pyx":198 * batch_size_at_t[t] = i * def unpad(padded): * unpadded = [None] * len(lengths) # <<<<<<<<<<<<<< * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) * for i in range(padded.shape[0]): */ if (unlikely(!__pyx_cur_scope->__pyx_v_lengths)) { __Pyx_RaiseClosureNameError("lengths"); __PYX_ERR(0, 198, __pyx_L1_error) } __pyx_t_1 = __pyx_cur_scope->__pyx_v_lengths; __Pyx_INCREF(__pyx_t_1); if (unlikely(__pyx_t_1 == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(0, 198, __pyx_L1_error) } __pyx_t_2 = PyList_GET_SIZE(__pyx_t_1); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(1 * ((__pyx_t_2<0) ? 0:__pyx_t_2)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_t_2; __pyx_temp++) { __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyList_SET_ITEM(__pyx_t_1, __pyx_temp, Py_None); } } __pyx_v_unpadded = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":199 * def unpad(padded): * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) # <<<<<<<<<<<<<< * for i in range(padded.shape[0]): * unpadded[indices[i]] = padded[i, :lengths[i]] */ if (unlikely(!__pyx_cur_scope->__pyx_v_self)) { __Pyx_RaiseClosureNameError("self"); __PYX_ERR(0, 199, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_padded, __pyx_n_s_transpose); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(!__pyx_cur_scope->__pyx_v_extra_dims)) { __Pyx_RaiseClosureNameError("extra_dims"); __PYX_ERR(0, 199, __pyx_L1_error) } __pyx_t_6 = PyNumber_Add(__pyx_tuple__20, __pyx_cur_scope->__pyx_v_extra_dims); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_padded, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":200 * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) * for i in range(padded.shape[0]): # <<<<<<<<<<<<<< * unpadded[indices[i]] = padded[i, :lengths[i]] * return unpadded */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_padded, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_4 = __pyx_t_1; __Pyx_INCREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_8 = NULL; } else { __pyx_t_2 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 200, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 200, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_4, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 200, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_4, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_8(__pyx_t_4); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 200, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":201 * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) * for i in range(padded.shape[0]): * unpadded[indices[i]] = padded[i, :lengths[i]] # <<<<<<<<<<<<<< * return unpadded * return arr, batch_size_at_t, unpad */ if (unlikely(!__pyx_cur_scope->__pyx_v_lengths)) { __Pyx_RaiseClosureNameError("lengths"); __PYX_ERR(0, 201, __pyx_L1_error) } if (unlikely(__pyx_cur_scope->__pyx_v_lengths == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 201, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_cur_scope->__pyx_v_lengths, __pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PySlice_New(Py_None, __pyx_t_1, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_i); __Pyx_GIVEREF(__pyx_v_i); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_padded, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_cur_scope->__pyx_v_indices)) { __Pyx_RaiseClosureNameError("indices"); __PYX_ERR(0, 201, __pyx_L1_error) } if (unlikely(__pyx_cur_scope->__pyx_v_indices == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 201, __pyx_L1_error) } __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_cur_scope->__pyx_v_indices, __pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_v_unpadded, __pyx_t_1, __pyx_t_3) < 0)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":200 * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) * for i in range(padded.shape[0]): # <<<<<<<<<<<<<< * unpadded[indices[i]] = padded[i, :lengths[i]] * return unpadded */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":202 * for i in range(padded.shape[0]): * unpadded[indices[i]] = padded[i, :lengths[i]] * return unpadded # <<<<<<<<<<<<<< * return arr, batch_size_at_t, unpad * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_unpadded); __pyx_r = __pyx_v_unpadded; goto __pyx_L0; /* "thinc/neural/ops.pyx":197 * break * batch_size_at_t[t] = i * def unpad(padded): # <<<<<<<<<<<<<< * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.Ops.square_sequences.unpad", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_unpadded); __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_padded); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_14square_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_seqs) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *__pyx_cur_scope; PyObject *__pyx_v_lengths_indices = NULL; Py_ssize_t __pyx_v_nB; PyObject *__pyx_v_nS = NULL; PyObject *__pyx_v_arr = NULL; PyObject *__pyx_v_arr_i = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_v_seqs_i = NULL; PyObject *__pyx_v_batch_size_at_t = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_v_t = NULL; PyObject *__pyx_v_unpad = 0; PyObject *__pyx_v_seq = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *(*__pyx_t_10)(PyObject *); int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__21) __Pyx_RefNannySetupContext("square_sequences", 0); __pyx_cur_scope = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(__pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 170, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } __Pyx_TraceCall("square_sequences", __pyx_f[0], 170, 0, __PYX_ERR(0, 170, __pyx_L1_error)); __pyx_cur_scope->__pyx_v_self = __pyx_v_self; __Pyx_INCREF(__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_self); /* "thinc/neural/ops.pyx":176 * to reverse the transformation. * ''' * lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)] # <<<<<<<<<<<<<< * lengths_indices.sort(reverse=True) * indices = [i for length, i in lengths_indices] */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_int_0); __pyx_t_2 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_seqs)) || PyTuple_CheckExact(__pyx_v_seqs)) { __pyx_t_3 = __pyx_v_seqs; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_seqs); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 176, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_6); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 176, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_6); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 176, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } } else { __pyx_t_6 = __pyx_t_5(__pyx_t_3); if (unlikely(!__pyx_t_6)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 176, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_6); } __Pyx_XDECREF_SET(__pyx_v_seq, __pyx_t_6); __pyx_t_6 = 0; __Pyx_INCREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); __pyx_t_6 = __Pyx_PyInt_AddObjC(__pyx_t_2, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = __pyx_t_6; __pyx_t_6 = 0; __pyx_t_7 = PyObject_Length(__pyx_v_seq); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 176, __pyx_L1_error) __pyx_t_6 = PyInt_FromSsize_t(__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __Pyx_INCREF(__pyx_v_i); __Pyx_GIVEREF(__pyx_v_i); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_i); __pyx_t_6 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_8))) __PYX_ERR(0, 176, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_lengths_indices = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":177 * ''' * lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)] * lengths_indices.sort(reverse=True) # <<<<<<<<<<<<<< * indices = [i for length, i in lengths_indices] * lengths = [length for length, i in lengths_indices] */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_lengths_indices, __pyx_n_s_sort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_reverse, Py_True) < 0) __PYX_ERR(0, 177, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":178 * lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)] * lengths_indices.sort(reverse=True) * indices = [i for length, i in lengths_indices] # <<<<<<<<<<<<<< * lengths = [length for length, i in lengths_indices] * nB = len(seqs) */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __pyx_v_lengths_indices; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; for (;;) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 178, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 178, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_8 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_8 = PyList_GET_ITEM(sequence, 0); __pyx_t_6 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(__pyx_t_6); #else __pyx_t_8 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext; index = 0; __pyx_t_8 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_8)) goto __pyx_L7_unpacking_failed; __Pyx_GOTREF(__pyx_t_8); index = 1; __pyx_t_6 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_6)) goto __pyx_L7_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 2) < 0) __PYX_ERR(0, 178, __pyx_L1_error) __pyx_t_10 = NULL; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L8_unpacking_done; __pyx_L7_unpacking_failed:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_10 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 178, __pyx_L1_error) __pyx_L8_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6); __pyx_t_6 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_v_i))) __PYX_ERR(0, 178, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GIVEREF(__pyx_t_3); __pyx_cur_scope->__pyx_v_indices = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":179 * lengths_indices.sort(reverse=True) * indices = [i for length, i in lengths_indices] * lengths = [length for length, i in lengths_indices] # <<<<<<<<<<<<<< * nB = len(seqs) * nS = max([len(seq) for seq in seqs]) */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __pyx_v_lengths_indices; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; for (;;) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 179, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 179, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_6 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_8 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_6 = PyList_GET_ITEM(sequence, 0); __pyx_t_8 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_8); #else __pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext; index = 0; __pyx_t_6 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_6)) goto __pyx_L11_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); index = 1; __pyx_t_8 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_8)) goto __pyx_L11_unpacking_failed; __Pyx_GOTREF(__pyx_t_8); if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 2) < 0) __PYX_ERR(0, 179, __pyx_L1_error) __pyx_t_10 = NULL; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L12_unpacking_done; __pyx_L11_unpacking_failed:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_10 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 179, __pyx_L1_error) __pyx_L12_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_8); __pyx_t_8 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_v_length))) __PYX_ERR(0, 179, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GIVEREF(__pyx_t_3); __pyx_cur_scope->__pyx_v_lengths = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":180 * indices = [i for length, i in lengths_indices] * lengths = [length for length, i in lengths_indices] * nB = len(seqs) # <<<<<<<<<<<<<< * nS = max([len(seq) for seq in seqs]) * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) */ __pyx_t_4 = PyObject_Length(__pyx_v_seqs); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 180, __pyx_L1_error) __pyx_v_nB = __pyx_t_4; /* "thinc/neural/ops.pyx":181 * lengths = [length for length, i in lengths_indices] * nB = len(seqs) * nS = max([len(seq) for seq in seqs]) # <<<<<<<<<<<<<< * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) * for arr_i, (length, seqs_i) in enumerate(lengths_indices): */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(PyList_CheckExact(__pyx_v_seqs)) || PyTuple_CheckExact(__pyx_v_seqs)) { __pyx_t_2 = __pyx_v_seqs; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_seqs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 181, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 181, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_5(__pyx_t_2); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 181, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF_SET(__pyx_v_seq, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = PyObject_Length(__pyx_v_seq); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 181, __pyx_L1_error) __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_1))) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_max, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_nS = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":182 * nB = len(seqs) * nS = max([len(seq) for seq in seqs]) * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) # <<<<<<<<<<<<<< * for arr_i, (length, seqs_i) in enumerate(lengths_indices): * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nB); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_INCREF(__pyx_v_nS); __Pyx_GIVEREF(__pyx_v_nS); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_nS); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_seqs, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_shape); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_8, 1, 0, NULL, NULL, &__pyx_slice__18, 1, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_seqs, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_arr = __pyx_t_6; __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":183 * nS = max([len(seq) for seq in seqs]) * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) * for arr_i, (length, seqs_i) in enumerate(lengths_indices): # <<<<<<<<<<<<<< * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) * extra_dims = tuple(range(2, len(arr.shape))) */ __Pyx_INCREF(__pyx_int_0); __pyx_t_6 = __pyx_int_0; __pyx_t_8 = __pyx_v_lengths_indices; __Pyx_INCREF(__pyx_t_8); __pyx_t_4 = 0; for (;;) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_8)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 183, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_8, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 183, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_1 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_10 = Py_TYPE(__pyx_t_9)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_2)) goto __pyx_L17_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_1 = __pyx_t_10(__pyx_t_9); if (unlikely(!__pyx_t_1)) goto __pyx_L17_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_9), 2) < 0) __PYX_ERR(0, 183, __pyx_L1_error) __pyx_t_10 = NULL; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L18_unpacking_done; __pyx_L17_unpacking_failed:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_10 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 183, __pyx_L1_error) __pyx_L18_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF_SET(__pyx_v_seqs_i, __pyx_t_1); __pyx_t_1 = 0; __Pyx_INCREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_arr_i, __pyx_t_6); __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_6, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":184 * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) * for arr_i, (length, seqs_i) in enumerate(lengths_indices): * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) # <<<<<<<<<<<<<< * extra_dims = tuple(range(2, len(arr.shape))) * arr = self.xp.ascontiguousarray(arr.transpose((1, 0) + extra_dims)) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_seqs, __pyx_v_seqs_i); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_3 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_9, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PySlice_New(Py_None, __pyx_v_length, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_arr_i); __Pyx_GIVEREF(__pyx_v_arr_i); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_arr_i); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __pyx_t_1 = 0; if (unlikely(PyObject_SetItem(__pyx_v_arr, __pyx_t_2, __pyx_t_3) < 0)) __PYX_ERR(0, 184, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":183 * nS = max([len(seq) for seq in seqs]) * arr = self.allocate((nB, nS) + seqs[0].shape[1:], dtype=seqs[0].dtype) * for arr_i, (length, seqs_i) in enumerate(lengths_indices): # <<<<<<<<<<<<<< * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) * extra_dims = tuple(range(2, len(arr.shape))) */ } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":185 * for arr_i, (length, seqs_i) in enumerate(lengths_indices): * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) * extra_dims = tuple(range(2, len(arr.shape))) # <<<<<<<<<<<<<< * arr = self.xp.ascontiguousarray(arr.transpose((1, 0) + extra_dims)) * # Build a lookup table so we can find how big the batch is at point t. */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arr, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyObject_Length(__pyx_t_6); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyInt_FromSsize_t(__pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_8, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PySequence_Tuple(__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GIVEREF(__pyx_t_8); __pyx_cur_scope->__pyx_v_extra_dims = ((PyObject*)__pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":186 * arr[arr_i, :length] = self.asarray(seqs[seqs_i]) * extra_dims = tuple(range(2, len(arr.shape))) * arr = self.xp.ascontiguousarray(arr.transpose((1, 0) + extra_dims)) # <<<<<<<<<<<<<< * # Build a lookup table so we can find how big the batch is at point t. * batch_size_at_t = self.allocate((nS,), dtype='i') */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_arr, __pyx_n_s_transpose); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Add(__pyx_tuple__20, __pyx_cur_scope->__pyx_v_extra_dims); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_8 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_arr, __pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":188 * arr = self.xp.ascontiguousarray(arr.transpose((1, 0) + extra_dims)) * # Build a lookup table so we can find how big the batch is at point t. * batch_size_at_t = self.allocate((nS,), dtype='i') # <<<<<<<<<<<<<< * batch_size_at_t += 1 * i = len(lengths) */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_nS); __Pyx_GIVEREF(__pyx_v_nS); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_nS); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_i) < 0) __PYX_ERR(0, 188, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_6, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_batch_size_at_t = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":189 * # Build a lookup table so we can find how big the batch is at point t. * batch_size_at_t = self.allocate((nS,), dtype='i') * batch_size_at_t += 1 # <<<<<<<<<<<<<< * i = len(lengths) * for t in range(nS): */ __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_v_batch_size_at_t, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_batch_size_at_t, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":190 * batch_size_at_t = self.allocate((nS,), dtype='i') * batch_size_at_t += 1 * i = len(lengths) # <<<<<<<<<<<<<< * for t in range(nS): * if t == lengths[i-1]: */ __pyx_t_2 = __pyx_cur_scope->__pyx_v_lengths; __Pyx_INCREF(__pyx_t_2); if (unlikely(__pyx_t_2 == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(0, 190, __pyx_L1_error) } __pyx_t_4 = PyList_GET_SIZE(__pyx_t_2); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":191 * batch_size_at_t += 1 * i = len(lengths) * for t in range(nS): # <<<<<<<<<<<<<< * if t == lengths[i-1]: * i -= 1 */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_v_nS); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 191, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 191, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 191, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_5(__pyx_t_3); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 191, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":192 * i = len(lengths) * for t in range(nS): * if t == lengths[i-1]: # <<<<<<<<<<<<<< * i -= 1 * if i == 0: */ __pyx_t_2 = __Pyx_PyInt_SubtractObjC(__pyx_v_i, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_cur_scope->__pyx_v_lengths, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_RichCompare(__pyx_v_t, __pyx_t_6, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_11) { /* "thinc/neural/ops.pyx":193 * for t in range(nS): * if t == lengths[i-1]: * i -= 1 # <<<<<<<<<<<<<< * if i == 0: * break */ __pyx_t_2 = __Pyx_PyInt_SubtractObjC(__pyx_v_i, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_i, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":194 * if t == lengths[i-1]: * i -= 1 * if i == 0: # <<<<<<<<<<<<<< * break * batch_size_at_t[t] = i */ __pyx_t_2 = __Pyx_PyInt_EqObjC(__pyx_v_i, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 194, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_11) { /* "thinc/neural/ops.pyx":195 * i -= 1 * if i == 0: * break # <<<<<<<<<<<<<< * batch_size_at_t[t] = i * def unpad(padded): */ goto __pyx_L20_break; /* "thinc/neural/ops.pyx":194 * if t == lengths[i-1]: * i -= 1 * if i == 0: # <<<<<<<<<<<<<< * break * batch_size_at_t[t] = i */ } /* "thinc/neural/ops.pyx":192 * i = len(lengths) * for t in range(nS): * if t == lengths[i-1]: # <<<<<<<<<<<<<< * i -= 1 * if i == 0: */ } /* "thinc/neural/ops.pyx":196 * if i == 0: * break * batch_size_at_t[t] = i # <<<<<<<<<<<<<< * def unpad(padded): * unpadded = [None] * len(lengths) */ if (unlikely(PyObject_SetItem(__pyx_v_batch_size_at_t, __pyx_v_t, __pyx_v_i) < 0)) __PYX_ERR(0, 196, __pyx_L1_error) /* "thinc/neural/ops.pyx":191 * batch_size_at_t += 1 * i = len(lengths) * for t in range(nS): # <<<<<<<<<<<<<< * if t == lengths[i-1]: * i -= 1 */ } __pyx_L20_break:; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":197 * break * batch_size_at_t[t] = i * def unpad(padded): # <<<<<<<<<<<<<< * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) */ __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_16square_sequences_1unpad, 0, __pyx_n_s_Ops_square_sequences_locals_unpa, ((PyObject*)__pyx_cur_scope), __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__23)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_unpad = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":203 * unpadded[indices[i]] = padded[i, :lengths[i]] * return unpadded * return arr, batch_size_at_t, unpad # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_arr); __Pyx_GIVEREF(__pyx_v_arr); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_arr); __Pyx_INCREF(__pyx_v_batch_size_at_t); __Pyx_GIVEREF(__pyx_v_batch_size_at_t); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_batch_size_at_t); __Pyx_INCREF(__pyx_v_unpad); __Pyx_GIVEREF(__pyx_v_unpad); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_unpad); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("thinc.neural.ops.Ops.square_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_lengths_indices); __Pyx_XDECREF(__pyx_v_nS); __Pyx_XDECREF(__pyx_v_arr); __Pyx_XDECREF(__pyx_v_arr_i); __Pyx_XDECREF(__pyx_v_length); __Pyx_XDECREF(__pyx_v_seqs_i); __Pyx_XDECREF(__pyx_v_batch_size_at_t); __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_t); __Pyx_XDECREF(__pyx_v_unpad); __Pyx_XDECREF(__pyx_v_seq); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":207 * @cython.boundscheck(False) * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): # <<<<<<<<<<<<<< * if drop is None or drop <= 0: * return None */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17get_dropout_mask(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_17get_dropout_mask = {"get_dropout_mask", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_17get_dropout_mask, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_17get_dropout_mask(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_shape = 0; PyObject *__pyx_v_drop = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("get_dropout_mask (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_shape,&__pyx_n_s_drop,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("get_dropout_mask", 1, 3, 3, 1); __PYX_ERR(0, 207, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_drop)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("get_dropout_mask", 1, 3, 3, 2); __PYX_ERR(0, 207, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "get_dropout_mask") < 0)) __PYX_ERR(0, 207, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_shape = values[1]; __pyx_v_drop = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("get_dropout_mask", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 207, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.get_dropout_mask", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_16get_dropout_mask(__pyx_self, __pyx_v_self, __pyx_v_shape, __pyx_v_drop); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_16get_dropout_mask(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_drop) { PyObject *__pyx_v_coinflips = NULL; PyObject *__pyx_v_mask = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__24) __Pyx_RefNannySetupContext("get_dropout_mask", 0); __Pyx_TraceCall("get_dropout_mask", __pyx_f[0], 207, 0, __PYX_ERR(0, 207, __pyx_L1_error)); /* "thinc/neural/ops.pyx":208 * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): * if drop is None or drop <= 0: # <<<<<<<<<<<<<< * return None * elif drop >= 1.: */ __pyx_t_2 = (__pyx_v_drop == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = PyObject_RichCompare(__pyx_v_drop, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 208, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __pyx_t_3; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":209 * def get_dropout_mask(self, shape, drop): * if drop is None or drop <= 0: * return None # <<<<<<<<<<<<<< * elif drop >= 1.: * return self.allocate(shape) */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "thinc/neural/ops.pyx":208 * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): * if drop is None or drop <= 0: # <<<<<<<<<<<<<< * return None * elif drop >= 1.: */ } /* "thinc/neural/ops.pyx":210 * if drop is None or drop <= 0: * return None * elif drop >= 1.: # <<<<<<<<<<<<<< * return self.allocate(shape) * coinflips = self.xp.random.uniform(0., 1., shape) */ __pyx_t_4 = PyObject_RichCompare(__pyx_v_drop, __pyx_float_1_, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 210, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 210, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_1) { /* "thinc/neural/ops.pyx":211 * return None * elif drop >= 1.: * return self.allocate(shape) # <<<<<<<<<<<<<< * coinflips = self.xp.random.uniform(0., 1., shape) * mask = (coinflips >= drop) / (1.-drop) */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":210 * if drop is None or drop <= 0: * return None * elif drop >= 1.: # <<<<<<<<<<<<<< * return self.allocate(shape) * coinflips = self.xp.random.uniform(0., 1., shape) */ } /* "thinc/neural/ops.pyx":212 * elif drop >= 1.: * return self.allocate(shape) * coinflips = self.xp.random.uniform(0., 1., shape) # <<<<<<<<<<<<<< * mask = (coinflips >= drop) / (1.-drop) * return self.asarray(mask, dtype='float32') */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_uniform); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_float_0_, __pyx_float_1_, __pyx_v_shape}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 3+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_float_0_, __pyx_float_1_, __pyx_v_shape}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 3+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_8 = PyTuple_New(3+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_float_0_); __Pyx_GIVEREF(__pyx_float_0_); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_float_0_); __Pyx_INCREF(__pyx_float_1_); __Pyx_GIVEREF(__pyx_float_1_); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_float_1_); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_8, 2+__pyx_t_7, __pyx_v_shape); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_coinflips = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":213 * return self.allocate(shape) * coinflips = self.xp.random.uniform(0., 1., shape) * mask = (coinflips >= drop) / (1.-drop) # <<<<<<<<<<<<<< * return self.asarray(mask, dtype='float32') * */ __pyx_t_4 = PyObject_RichCompare(__pyx_v_coinflips, __pyx_v_drop, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 213, __pyx_L1_error) __pyx_t_5 = __Pyx_PyFloat_SubtractCObj(__pyx_float_1_, __pyx_v_drop, 1., 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_mask = __pyx_t_8; __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":214 * coinflips = self.xp.random.uniform(0., 1., shape) * mask = (coinflips >= drop) / (1.-drop) * return self.asarray(mask, dtype='float32') # <<<<<<<<<<<<<< * * def allocate(self, shape, dtype='float32'): */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_asarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_mask); __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 214, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":207 * @cython.boundscheck(False) * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): # <<<<<<<<<<<<<< * if drop is None or drop <= 0: * return None */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.Ops.get_dropout_mask", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_coinflips); __Pyx_XDECREF(__pyx_v_mask); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":216 * return self.asarray(mask, dtype='float32') * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_19allocate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_19allocate = {"allocate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_19allocate, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_19allocate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_shape = 0; PyObject *__pyx_v_dtype = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("allocate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_shape,&__pyx_n_s_dtype,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject*)__pyx_n_s_float32)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("allocate", 0, 2, 3, 1); __PYX_ERR(0, 216, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "allocate") < 0)) __PYX_ERR(0, 216, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_shape = values[1]; __pyx_v_dtype = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("allocate", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 216, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.allocate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_18allocate(__pyx_self, __pyx_v_self, __pyx_v_shape, __pyx_v_dtype); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_18allocate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_dtype) { CYTHON_UNUSED PyObject *__pyx_v_nr_weight = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__25) __Pyx_RefNannySetupContext("allocate", 0); __Pyx_TraceCall("allocate", __pyx_f[0], 216, 0, __PYX_ERR(0, 216, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_shape); /* "thinc/neural/ops.pyx":217 * * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): # <<<<<<<<<<<<<< * shape = (shape,) * nr_weight = numpy.prod(shape) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_integer_types); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_IsInstance(__pyx_v_shape, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 217, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":218 * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): * shape = (shape,) # <<<<<<<<<<<<<< * nr_weight = numpy.prod(shape) * return self.xp.zeros(shape, dtype=dtype) */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __Pyx_DECREF_SET(__pyx_v_shape, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":217 * * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): # <<<<<<<<<<<<<< * shape = (shape,) * nr_weight = numpy.prod(shape) */ } /* "thinc/neural/ops.pyx":219 * if isinstance(shape, integer_types): * shape = (shape,) * nr_weight = numpy.prod(shape) # <<<<<<<<<<<<<< * return self.xp.zeros(shape, dtype=dtype) * */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_numpy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_prod); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_nr_weight = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":220 * shape = (shape,) * nr_weight = numpy.prod(shape) * return self.xp.zeros(shape, dtype=dtype) # <<<<<<<<<<<<<< * * def unzip(self, data): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 220, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":216 * return self.asarray(mask, dtype='float32') * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.allocate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_nr_weight); __Pyx_XDECREF(__pyx_v_shape); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":222 * return self.xp.zeros(shape, dtype=dtype) * * def unzip(self, data): # <<<<<<<<<<<<<< * X, y = zip(*data) * return self.asarray(X), self.asarray(y) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_21unzip(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_21unzip = {"unzip", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_21unzip, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_21unzip(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("unzip (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("unzip", 1, 2, 2, 1); __PYX_ERR(0, 222, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "unzip") < 0)) __PYX_ERR(0, 222, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_data = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("unzip", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 222, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.unzip", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_20unzip(__pyx_self, __pyx_v_self, __pyx_v_data); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_20unzip(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data) { PyObject *__pyx_v_X = NULL; PyObject *__pyx_v_y = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__26) __Pyx_RefNannySetupContext("unzip", 0); __Pyx_TraceCall("unzip", __pyx_f[0], 222, 0, __PYX_ERR(0, 222, __pyx_L1_error)); /* "thinc/neural/ops.pyx":223 * * def unzip(self, data): * X, y = zip(*data) # <<<<<<<<<<<<<< * return self.asarray(X), self.asarray(y) * */ __pyx_t_1 = __Pyx_PySequence_Tuple(__pyx_v_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 223, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_1 = PyList_GET_ITEM(sequence, 0); __pyx_t_3 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; index = 0; __pyx_t_1 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); index = 1; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) __PYX_ERR(0, 223, __pyx_L1_error) __pyx_t_5 = NULL; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 223, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_X = __pyx_t_1; __pyx_t_1 = 0; __pyx_v_y = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":224 * def unzip(self, data): * X, y = zip(*data) * return self.asarray(X), self.asarray(y) # <<<<<<<<<<<<<< * * def asarray(self, data, dtype=None): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_asarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_2 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_1, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_X); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_4, __pyx_v_y) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_y); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 224, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":222 * return self.xp.zeros(shape, dtype=dtype) * * def unzip(self, data): # <<<<<<<<<<<<<< * X, y = zip(*data) * return self.asarray(X), self.asarray(y) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.unzip", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_X); __Pyx_XDECREF(__pyx_v_y); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":226 * return self.asarray(X), self.asarray(y) * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_23asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_23asarray = {"asarray", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_23asarray, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_23asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; PyObject *__pyx_v_dtype = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("asarray (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,&__pyx_n_s_dtype,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, 1); __PYX_ERR(0, 226, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "asarray") < 0)) __PYX_ERR(0, 226, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_data = values[1]; __pyx_v_dtype = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 226, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_22asarray(__pyx_self, __pyx_v_self, __pyx_v_data, __pyx_v_dtype); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_22asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_dtype) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__27) __Pyx_RefNannySetupContext("asarray", 0); __Pyx_TraceCall("asarray", __pyx_f[0], 226, 0, __PYX_ERR(0, 226, __pyx_L1_error)); /* "thinc/neural/ops.pyx":227 * * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): # <<<<<<<<<<<<<< * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":228 * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): * if dtype is not None: # <<<<<<<<<<<<<< * return self.xp.asarray(data, dtype=dtype) * else: */ __pyx_t_4 = (__pyx_v_dtype != Py_None); __pyx_t_3 = (__pyx_t_4 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":229 * if isinstance(data, self.xp.ndarray): * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) # <<<<<<<<<<<<<< * else: * return self.xp.asarray(data) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_data); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 229, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":228 * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): * if dtype is not None: # <<<<<<<<<<<<<< * return self.xp.asarray(data, dtype=dtype) * else: */ } /* "thinc/neural/ops.pyx":231 * return self.xp.asarray(data, dtype=dtype) * else: * return self.xp.asarray(data) # <<<<<<<<<<<<<< * elif hasattr(data, 'numpy'): * # Handles PyTorch Tensor */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 231, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 231, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 231, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":227 * * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): # <<<<<<<<<<<<<< * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) */ } /* "thinc/neural/ops.pyx":232 * else: * return self.xp.asarray(data) * elif hasattr(data, 'numpy'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensor * return data.numpy() */ __pyx_t_3 = __Pyx_HasAttr(__pyx_v_data, __pyx_n_s_numpy); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 232, __pyx_L1_error) __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":234 * elif hasattr(data, 'numpy'): * # Handles PyTorch Tensor * return data.numpy() # <<<<<<<<<<<<<< * elif dtype is not None: * return self.xp.array(data, dtype=dtype) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":232 * else: * return self.xp.asarray(data) * elif hasattr(data, 'numpy'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensor * return data.numpy() */ } /* "thinc/neural/ops.pyx":235 * # Handles PyTorch Tensor * return data.numpy() * elif dtype is not None: # <<<<<<<<<<<<<< * return self.xp.array(data, dtype=dtype) * else: */ __pyx_t_4 = (__pyx_v_dtype != Py_None); __pyx_t_3 = (__pyx_t_4 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":236 * return data.numpy() * elif dtype is not None: * return self.xp.array(data, dtype=dtype) # <<<<<<<<<<<<<< * else: * return self.xp.array(data) */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_data); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 236, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":235 * # Handles PyTorch Tensor * return data.numpy() * elif dtype is not None: # <<<<<<<<<<<<<< * return self.xp.array(data, dtype=dtype) * else: */ } /* "thinc/neural/ops.pyx":238 * return self.xp.array(data, dtype=dtype) * else: * return self.xp.array(data) # <<<<<<<<<<<<<< * * def batch_dot(self, x, y, transpose=False): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":226 * return self.asarray(X), self.asarray(y) * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":240 * return self.xp.array(data) * * def batch_dot(self, x, y, transpose=False): # <<<<<<<<<<<<<< * # TODO: Fix this confusing inversion =/ * if not transpose: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_25batch_dot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_25batch_dot = {"batch_dot", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_25batch_dot, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_25batch_dot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_y = 0; PyObject *__pyx_v_transpose = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("batch_dot (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_transpose,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("batch_dot", 0, 3, 4, 1); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("batch_dot", 0, 3, 4, 2); __PYX_ERR(0, 240, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_transpose); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "batch_dot") < 0)) __PYX_ERR(0, 240, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_y = values[2]; __pyx_v_transpose = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("batch_dot", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 240, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.batch_dot", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_24batch_dot(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_transpose); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_24batch_dot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_transpose) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__28) __Pyx_RefNannySetupContext("batch_dot", 0); __Pyx_TraceCall("batch_dot", __pyx_f[0], 240, 0, __PYX_ERR(0, 240, __pyx_L1_error)); /* "thinc/neural/ops.pyx":242 * def batch_dot(self, x, y, transpose=False): * # TODO: Fix this confusing inversion =/ * if not transpose: # <<<<<<<<<<<<<< * return self.xp.dot(x, y.T) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_transpose); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 242, __pyx_L1_error) __pyx_t_2 = ((!__pyx_t_1) != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":243 * # TODO: Fix this confusing inversion =/ * if not transpose: * return self.xp.dot(x, y.T) # <<<<<<<<<<<<<< * else: * return self.xp.dot(x, y) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_dot); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_y, __pyx_n_s_T); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_x, __pyx_t_4}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_x, __pyx_t_4}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_v_x); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":242 * def batch_dot(self, x, y, transpose=False): * # TODO: Fix this confusing inversion =/ * if not transpose: # <<<<<<<<<<<<<< * return self.xp.dot(x, y.T) * else: */ } /* "thinc/neural/ops.pyx":245 * return self.xp.dot(x, y.T) * else: * return self.xp.dot(x, y) # <<<<<<<<<<<<<< * * def add_batch_outer(self, output, x, y): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_dot); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_x, __pyx_v_y}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_x, __pyx_v_y}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_7, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_7, __pyx_v_y); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":240 * return self.xp.array(data) * * def batch_dot(self, x, y, transpose=False): # <<<<<<<<<<<<<< * # TODO: Fix this confusing inversion =/ * if not transpose: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.Ops.batch_dot", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":247 * return self.xp.dot(x, y) * * def add_batch_outer(self, output, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * output += self.xp.tensordot(x, y, axes=[[0], [0]]) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_27add_batch_outer(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_27add_batch_outer = {"add_batch_outer", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_27add_batch_outer, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_27add_batch_outer(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_output = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_y = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_batch_outer (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_output,&__pyx_n_s_x,&__pyx_n_s_y,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_output)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_batch_outer", 1, 4, 4, 1); __PYX_ERR(0, 247, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_batch_outer", 1, 4, 4, 2); __PYX_ERR(0, 247, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_batch_outer", 1, 4, 4, 3); __PYX_ERR(0, 247, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add_batch_outer") < 0)) __PYX_ERR(0, 247, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_output = values[1]; __pyx_v_x = values[2]; __pyx_v_y = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add_batch_outer", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 247, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.add_batch_outer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_26add_batch_outer(__pyx_self, __pyx_v_self, __pyx_v_output, __pyx_v_x, __pyx_v_y); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_26add_batch_outer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_output, PyObject *__pyx_v_x, PyObject *__pyx_v_y) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__29) __Pyx_RefNannySetupContext("add_batch_outer", 0); __Pyx_TraceCall("add_batch_outer", __pyx_f[0], 247, 0, __PYX_ERR(0, 247, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_output); /* "thinc/neural/ops.pyx":249 * def add_batch_outer(self, output, x, y): * # TODO: Deprecate this * output += self.xp.tensordot(x, y, axes=[[0], [0]]) # <<<<<<<<<<<<<< * * def norm(self, x): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_tensordot); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_y); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_0); __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_0); __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axes, __pyx_t_6) < 0) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_output, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_output, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":247 * return self.xp.dot(x, y) * * def add_batch_outer(self, output, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * output += self.xp.tensordot(x, y, axes=[[0], [0]]) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.add_batch_outer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_output); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":251 * output += self.xp.tensordot(x, y, axes=[[0], [0]]) * * def norm(self, x): # <<<<<<<<<<<<<< * return self.xp.sqrt((x * x).sum()) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_29norm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_29norm = {"norm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_29norm, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_29norm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("norm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("norm", 1, 2, 2, 1); __PYX_ERR(0, 251, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "norm") < 0)) __PYX_ERR(0, 251, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_x = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("norm", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 251, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.norm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_28norm(__pyx_self, __pyx_v_self, __pyx_v_x); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_28norm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__30) __Pyx_RefNannySetupContext("norm", 0); __Pyx_TraceCall("norm", __pyx_f[0], 251, 0, __PYX_ERR(0, 251, __pyx_L1_error)); /* "thinc/neural/ops.pyx":252 * * def norm(self, x): * return self.xp.sqrt((x * x).sum()) # <<<<<<<<<<<<<< * * def dot(self, x, y): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = PyNumber_Multiply(__pyx_v_x, __pyx_v_x); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_sum); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_5, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":251 * output += self.xp.tensordot(x, y, axes=[[0], [0]]) * * def norm(self, x): # <<<<<<<<<<<<<< * return self.xp.sqrt((x * x).sum()) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.norm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":254 * return self.xp.sqrt((x * x).sum()) * * def dot(self, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * return self.xp.dot(x, y) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_31dot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_31dot = {"dot", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_31dot, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_31dot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_y = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dot (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dot", 1, 3, 3, 1); __PYX_ERR(0, 254, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dot", 1, 3, 3, 2); __PYX_ERR(0, 254, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dot") < 0)) __PYX_ERR(0, 254, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_y = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dot", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 254, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dot", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_30dot(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_30dot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__31) __Pyx_RefNannySetupContext("dot", 0); __Pyx_TraceCall("dot", __pyx_f[0], 254, 0, __PYX_ERR(0, 254, __pyx_L1_error)); /* "thinc/neural/ops.pyx":256 * def dot(self, x, y): * # TODO: Deprecate this * return self.xp.dot(x, y) # <<<<<<<<<<<<<< * * def affine(self, weights, bias, signal): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_dot); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_v_y}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_v_y}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_y); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":254 * return self.xp.sqrt((x * x).sum()) * * def dot(self, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * return self.xp.dot(x, y) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.dot", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":258 * return self.xp.dot(x, y) * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * return self.gemm(signal, weights, trans2=True) + bias * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_33affine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_33affine = {"affine", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_33affine, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_33affine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_weights = 0; PyObject *__pyx_v_bias = 0; PyObject *__pyx_v_signal = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("affine (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_weights,&__pyx_n_s_bias,&__pyx_n_s_signal,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 1); __PYX_ERR(0, 258, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 2); __PYX_ERR(0, 258, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 3); __PYX_ERR(0, 258, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "affine") < 0)) __PYX_ERR(0, 258, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_weights = values[1]; __pyx_v_bias = values[2]; __pyx_v_signal = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 258, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.affine", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_32affine(__pyx_self, __pyx_v_self, __pyx_v_weights, __pyx_v_bias, __pyx_v_signal); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_32affine(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_bias, PyObject *__pyx_v_signal) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__32) __Pyx_RefNannySetupContext("affine", 0); __Pyx_TraceCall("affine", __pyx_f[0], 258, 0, __PYX_ERR(0, 258, __pyx_L1_error)); /* "thinc/neural/ops.pyx":259 * * def affine(self, weights, bias, signal): * return self.gemm(signal, weights, trans2=True) + bias # <<<<<<<<<<<<<< * * def add_sum(self, out, to_sum): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_gemm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_signal); __Pyx_GIVEREF(__pyx_v_signal); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_signal); __Pyx_INCREF(__pyx_v_weights); __Pyx_GIVEREF(__pyx_v_weights); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_weights); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_trans2, Py_True) < 0) __PYX_ERR(0, 259, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Add(__pyx_t_4, __pyx_v_bias); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":258 * return self.xp.dot(x, y) * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * return self.gemm(signal, weights, trans2=True) + bias * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.affine", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":261 * return self.gemm(signal, weights, trans2=True) + bias * * def add_sum(self, out, to_sum): # <<<<<<<<<<<<<< * out += to_sum.sum(axis=0) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_35add_sum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_35add_sum = {"add_sum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_35add_sum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_35add_sum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_out = 0; PyObject *__pyx_v_to_sum = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_sum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_out,&__pyx_n_s_to_sum,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, 1); __PYX_ERR(0, 261, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_to_sum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, 2); __PYX_ERR(0, 261, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add_sum") < 0)) __PYX_ERR(0, 261, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_out = values[1]; __pyx_v_to_sum = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 261, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.add_sum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_34add_sum(__pyx_self, __pyx_v_self, __pyx_v_out, __pyx_v_to_sum); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_34add_sum(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_out, PyObject *__pyx_v_to_sum) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__33) __Pyx_RefNannySetupContext("add_sum", 0); __Pyx_TraceCall("add_sum", __pyx_f[0], 261, 0, __PYX_ERR(0, 261, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_out); /* "thinc/neural/ops.pyx":262 * * def add_sum(self, out, to_sum): * out += to_sum.sum(axis=0) # <<<<<<<<<<<<<< * * def argmax(self, x, axis=-1): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_to_sum, __pyx_n_s_sum); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_int_0) < 0) __PYX_ERR(0, 262, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_out, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_out, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":261 * return self.gemm(signal, weights, trans2=True) + bias * * def add_sum(self, out, to_sum): # <<<<<<<<<<<<<< * out += to_sum.sum(axis=0) * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.Ops.add_sum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":264 * out += to_sum.sum(axis=0) * * def argmax(self, x, axis=-1): # <<<<<<<<<<<<<< * return self.xp.argmax(x, axis=axis) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_37argmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_37argmax = {"argmax", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_37argmax, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_37argmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("argmax (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_axis,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("argmax", 0, 2, 3, 1); __PYX_ERR(0, 264, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "argmax") < 0)) __PYX_ERR(0, 264, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_axis = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("argmax", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 264, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.argmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_36argmax(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_36argmax(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_axis) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__34) __Pyx_RefNannySetupContext("argmax", 0); __Pyx_TraceCall("argmax", __pyx_f[0], 264, 0, __PYX_ERR(0, 264, __pyx_L1_error)); /* "thinc/neural/ops.pyx":265 * * def argmax(self, x, axis=-1): * return self.xp.argmax(x, axis=axis) # <<<<<<<<<<<<<< * * def sigmoid(self, X): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_argmax); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axis, __pyx_v_axis) < 0) __PYX_ERR(0, 265, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":264 * out += to_sum.sum(axis=0) * * def argmax(self, x, axis=-1): # <<<<<<<<<<<<<< * return self.xp.argmax(x, axis=axis) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.argmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":267 * return self.xp.argmax(x, axis=axis) * * def sigmoid(self, X): # <<<<<<<<<<<<<< * return 1./(1. + self.xp.exp(-X)) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_39sigmoid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_39sigmoid = {"sigmoid", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_39sigmoid, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_39sigmoid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sigmoid (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("sigmoid", 1, 2, 2, 1); __PYX_ERR(0, 267, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sigmoid") < 0)) __PYX_ERR(0, 267, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_X = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("sigmoid", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 267, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.sigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_38sigmoid(__pyx_self, __pyx_v_self, __pyx_v_X); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_38sigmoid(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__35) __Pyx_RefNannySetupContext("sigmoid", 0); __Pyx_TraceCall("sigmoid", __pyx_f[0], 267, 0, __PYX_ERR(0, 267, __pyx_L1_error)); /* "thinc/neural/ops.pyx":268 * * def sigmoid(self, X): * return 1./(1. + self.xp.exp(-X)) # <<<<<<<<<<<<<< * * def dsigmoid(self, y): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_exp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Negative(__pyx_v_X); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_, __pyx_t_1, 1., 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyFloat_DivideCObj(__pyx_float_1_, __pyx_t_3, 1., 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":267 * return self.xp.argmax(x, axis=axis) * * def sigmoid(self, X): # <<<<<<<<<<<<<< * return 1./(1. + self.xp.exp(-X)) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.sigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":270 * return 1./(1. + self.xp.exp(-X)) * * def dsigmoid(self, y): # <<<<<<<<<<<<<< * return y*(1-y) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_41dsigmoid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_41dsigmoid = {"dsigmoid", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_41dsigmoid, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_41dsigmoid(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_y = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dsigmoid (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dsigmoid", 1, 2, 2, 1); __PYX_ERR(0, 270, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dsigmoid") < 0)) __PYX_ERR(0, 270, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dsigmoid", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 270, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dsigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_40dsigmoid(__pyx_self, __pyx_v_self, __pyx_v_y); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_40dsigmoid(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_y) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__36) __Pyx_RefNannySetupContext("dsigmoid", 0); __Pyx_TraceCall("dsigmoid", __pyx_f[0], 270, 0, __PYX_ERR(0, 270, __pyx_L1_error)); /* "thinc/neural/ops.pyx":271 * * def dsigmoid(self, y): * return y*(1-y) # <<<<<<<<<<<<<< * * def dtanh(self, y): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_y, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_v_y, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":270 * return 1./(1. + self.xp.exp(-X)) * * def dsigmoid(self, y): # <<<<<<<<<<<<<< * return y*(1-y) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("thinc.neural.ops.Ops.dsigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":273 * return y*(1-y) * * def dtanh(self, y): # <<<<<<<<<<<<<< * return 1-y**2 * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_43dtanh(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_43dtanh = {"dtanh", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_43dtanh, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_43dtanh(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_y = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dtanh (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dtanh", 1, 2, 2, 1); __PYX_ERR(0, 273, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dtanh") < 0)) __PYX_ERR(0, 273, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dtanh", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 273, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.dtanh", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_42dtanh(__pyx_self, __pyx_v_self, __pyx_v_y); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_42dtanh(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_y) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__37) __Pyx_RefNannySetupContext("dtanh", 0); __Pyx_TraceCall("dtanh", __pyx_f[0], 273, 0, __PYX_ERR(0, 273, __pyx_L1_error)); /* "thinc/neural/ops.pyx":274 * * def dtanh(self, y): * return 1-y**2 # <<<<<<<<<<<<<< * * def softmax(self, x, inplace=False, axis=-1): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyNumber_Power(__pyx_v_y, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_t_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":273 * return y*(1-y) * * def dtanh(self, y): # <<<<<<<<<<<<<< * return 1-y**2 * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("thinc.neural.ops.Ops.dtanh", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":276 * return 1-y**2 * * def softmax(self, x, inplace=False, axis=-1): # <<<<<<<<<<<<<< * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_45softmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_45softmax = {"softmax", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_45softmax, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_45softmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_inplace = 0; PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("softmax (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_inplace,&__pyx_n_s_axis,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_False)); values[3] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("softmax", 0, 2, 4, 1); __PYX_ERR(0, 276, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "softmax") < 0)) __PYX_ERR(0, 276, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_inplace = values[2]; __pyx_v_axis = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("softmax", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 276, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.softmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_44softmax(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_inplace, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_44softmax(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_inplace, PyObject *__pyx_v_axis) { CYTHON_UNUSED PyObject *__pyx_v_shape = NULL; PyObject *__pyx_v_maxes = NULL; PyObject *__pyx_v_shifted = NULL; PyObject *__pyx_v_new_x = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__38) __Pyx_RefNannySetupContext("softmax", 0); __Pyx_TraceCall("softmax", __pyx_f[0], 276, 0, __PYX_ERR(0, 276, __pyx_L1_error)); /* "thinc/neural/ops.pyx":277 * * def softmax(self, x, inplace=False, axis=-1): * shape = x.shape # <<<<<<<<<<<<<< * maxes = self.xp.max(x, axis=axis, keepdims=True) * shifted = x - maxes */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_shape = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":278 * def softmax(self, x, inplace=False, axis=-1): * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) # <<<<<<<<<<<<<< * shifted = x - maxes * new_x = self.xp.exp(shifted) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_max); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axis, __pyx_v_axis) < 0) __PYX_ERR(0, 278, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_keepdims, Py_True) < 0) __PYX_ERR(0, 278, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_maxes = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":279 * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) * shifted = x - maxes # <<<<<<<<<<<<<< * new_x = self.xp.exp(shifted) * new_x /= new_x.sum(axis=axis, keepdims=True) */ __pyx_t_4 = PyNumber_Subtract(__pyx_v_x, __pyx_v_maxes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 279, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_shifted = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":280 * maxes = self.xp.max(x, axis=axis, keepdims=True) * shifted = x - maxes * new_x = self.xp.exp(shifted) # <<<<<<<<<<<<<< * new_x /= new_x.sum(axis=axis, keepdims=True) * if inplace: */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_4 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_shifted) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_shifted); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_new_x = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":281 * shifted = x - maxes * new_x = self.xp.exp(shifted) * new_x /= new_x.sum(axis=axis, keepdims=True) # <<<<<<<<<<<<<< * if inplace: * copy_array(x, new_x) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_new_x, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_axis, __pyx_v_axis) < 0) __PYX_ERR(0, 281, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_keepdims, Py_True) < 0) __PYX_ERR(0, 281, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_new_x, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_new_x, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":282 * new_x = self.xp.exp(shifted) * new_x /= new_x.sum(axis=axis, keepdims=True) * if inplace: # <<<<<<<<<<<<<< * copy_array(x, new_x) * return x */ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 282, __pyx_L1_error) if (__pyx_t_5) { /* "thinc/neural/ops.pyx":283 * new_x /= new_x.sum(axis=axis, keepdims=True) * if inplace: * copy_array(x, new_x) # <<<<<<<<<<<<<< * return x * else: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_new_x}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_new_x}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_2 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_6, __pyx_v_x); __Pyx_INCREF(__pyx_v_new_x); __Pyx_GIVEREF(__pyx_v_new_x); PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_6, __pyx_v_new_x); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":284 * if inplace: * copy_array(x, new_x) * return x # <<<<<<<<<<<<<< * else: * return new_x */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_x); __pyx_r = __pyx_v_x; goto __pyx_L0; /* "thinc/neural/ops.pyx":282 * new_x = self.xp.exp(shifted) * new_x /= new_x.sum(axis=axis, keepdims=True) * if inplace: # <<<<<<<<<<<<<< * copy_array(x, new_x) * return x */ } /* "thinc/neural/ops.pyx":286 * return x * else: * return new_x # <<<<<<<<<<<<<< * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_new_x); __pyx_r = __pyx_v_new_x; goto __pyx_L0; } /* "thinc/neural/ops.pyx":276 * return 1-y**2 * * def softmax(self, x, inplace=False, axis=-1): # <<<<<<<<<<<<<< * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.softmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_shape); __Pyx_XDECREF(__pyx_v_maxes); __Pyx_XDECREF(__pyx_v_shifted); __Pyx_XDECREF(__pyx_v_new_x); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":288 * return new_x * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): # <<<<<<<<<<<<<< * if Xs.ndim >= 3: * raise NotImplementedError( */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_47softmax_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_47softmax_sequences = {"softmax_sequences", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_47softmax_sequences, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_47softmax_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_Xs = 0; PyObject *__pyx_v_lengths = 0; PyObject *__pyx_v_inplace = 0; CYTHON_UNUSED PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("softmax_sequences (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_Xs,&__pyx_n_s_lengths,&__pyx_n_s_inplace,&__pyx_n_s_axis,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); values[4] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Xs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("softmax_sequences", 0, 3, 5, 1); __PYX_ERR(0, 288, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("softmax_sequences", 0, 3, 5, 2); __PYX_ERR(0, 288, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "softmax_sequences") < 0)) __PYX_ERR(0, 288, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_Xs = values[1]; __pyx_v_lengths = values[2]; __pyx_v_inplace = values[3]; __pyx_v_axis = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("softmax_sequences", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 288, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.softmax_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_46softmax_sequences(__pyx_self, __pyx_v_self, __pyx_v_Xs, __pyx_v_lengths, __pyx_v_inplace, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_46softmax_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_Xs, PyObject *__pyx_v_lengths, PyObject *__pyx_v_inplace, CYTHON_UNUSED PyObject *__pyx_v_axis) { PyObject *__pyx_v_new_x = NULL; PyObject *__pyx_v_summed = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__39) __Pyx_RefNannySetupContext("softmax_sequences", 0); __Pyx_TraceCall("softmax_sequences", __pyx_f[0], 288, 0, __PYX_ERR(0, 288, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_Xs); /* "thinc/neural/ops.pyx":289 * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): * if Xs.ndim >= 3: # <<<<<<<<<<<<<< * raise NotImplementedError( * "Softmax currently only supports 2d. ndim=%d" % Xs.ndim) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Xs, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_int_3, Py_GE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 289, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(__pyx_t_3)) { /* "thinc/neural/ops.pyx":291 * if Xs.ndim >= 3: * raise NotImplementedError( * "Softmax currently only supports 2d. ndim=%d" % Xs.ndim) # <<<<<<<<<<<<<< * # This loses almost no fidelity, and helps the numerical stability. * Xs = self.xp.clip(Xs, -20., 20.) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_Xs, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Softmax_currently_only_supports, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":290 * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): * if Xs.ndim >= 3: * raise NotImplementedError( # <<<<<<<<<<<<<< * "Softmax currently only supports 2d. ndim=%d" % Xs.ndim) * # This loses almost no fidelity, and helps the numerical stability. */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_NotImplementedError, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 290, __pyx_L1_error) /* "thinc/neural/ops.pyx":289 * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): * if Xs.ndim >= 3: # <<<<<<<<<<<<<< * raise NotImplementedError( * "Softmax currently only supports 2d. ndim=%d" % Xs.ndim) */ } /* "thinc/neural/ops.pyx":293 * "Softmax currently only supports 2d. ndim=%d" % Xs.ndim) * # This loses almost no fidelity, and helps the numerical stability. * Xs = self.xp.clip(Xs, -20., 20.) # <<<<<<<<<<<<<< * new_x = self.xp.exp(Xs) * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_clip); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_v_Xs, __pyx_float_neg_20_, __pyx_float_20_}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_5, 3+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_v_Xs, __pyx_float_neg_20_, __pyx_float_20_}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_5, 3+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { __pyx_t_6 = PyTuple_New(3+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_INCREF(__pyx_v_Xs); __Pyx_GIVEREF(__pyx_v_Xs); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_Xs); __Pyx_INCREF(__pyx_float_neg_20_); __Pyx_GIVEREF(__pyx_float_neg_20_); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_float_neg_20_); __Pyx_INCREF(__pyx_float_20_); __Pyx_GIVEREF(__pyx_float_20_); PyTuple_SET_ITEM(__pyx_t_6, 2+__pyx_t_5, __pyx_float_20_); __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_Xs, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":294 * # This loses almost no fidelity, and helps the numerical stability. * Xs = self.xp.clip(Xs, -20., 20.) * new_x = self.xp.exp(Xs) # <<<<<<<<<<<<<< * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) * new_x /= summed */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_exp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_4, __pyx_v_Xs) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_Xs); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_new_x = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":295 * Xs = self.xp.clip(Xs, -20., 20.) * new_x = self.xp.exp(Xs) * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) # <<<<<<<<<<<<<< * new_x /= summed * if inplace: */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_backprop_sum_pool); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_sum_pool); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_new_x, __pyx_v_lengths}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_new_x, __pyx_v_lengths}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_INCREF(__pyx_v_new_x); __Pyx_GIVEREF(__pyx_v_new_x); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_5, __pyx_v_new_x); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_5, __pyx_v_lengths); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_t_4, __pyx_v_lengths}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_t_4, __pyx_v_lengths}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_5, __pyx_t_4); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_5, __pyx_v_lengths); __pyx_t_4 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_summed = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":296 * new_x = self.xp.exp(Xs) * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) * new_x /= summed # <<<<<<<<<<<<<< * if inplace: * copy_array(Xs, new_x) */ __pyx_t_2 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_new_x, __pyx_v_summed); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 296, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_new_x, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":297 * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) * new_x /= summed * if inplace: # <<<<<<<<<<<<<< * copy_array(Xs, new_x) * return Xs */ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 297, __pyx_L1_error) if (__pyx_t_3) { /* "thinc/neural/ops.pyx":298 * new_x /= summed * if inplace: * copy_array(Xs, new_x) # <<<<<<<<<<<<<< * return Xs * else: */ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_Xs, __pyx_v_new_x}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_Xs, __pyx_v_new_x}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_8) { __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __pyx_t_8 = NULL; } __Pyx_INCREF(__pyx_v_Xs); __Pyx_GIVEREF(__pyx_v_Xs); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_5, __pyx_v_Xs); __Pyx_INCREF(__pyx_v_new_x); __Pyx_GIVEREF(__pyx_v_new_x); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_5, __pyx_v_new_x); __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 298, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":299 * if inplace: * copy_array(Xs, new_x) * return Xs # <<<<<<<<<<<<<< * else: * return new_x */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Xs); __pyx_r = __pyx_v_Xs; goto __pyx_L0; /* "thinc/neural/ops.pyx":297 * summed = self.backprop_sum_pool(self.sum_pool(new_x, lengths), lengths) * new_x /= summed * if inplace: # <<<<<<<<<<<<<< * copy_array(Xs, new_x) * return Xs */ } /* "thinc/neural/ops.pyx":301 * return Xs * else: * return new_x # <<<<<<<<<<<<<< * * def backprop_softmax(self, Y, dY, axis=-1): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_new_x); __pyx_r = __pyx_v_new_x; goto __pyx_L0; } /* "thinc/neural/ops.pyx":288 * return new_x * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): # <<<<<<<<<<<<<< * if Xs.ndim >= 3: * raise NotImplementedError( */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.Ops.softmax_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_new_x); __Pyx_XDECREF(__pyx_v_summed); __Pyx_XDECREF(__pyx_v_Xs); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":303 * return new_x * * def backprop_softmax(self, Y, dY, axis=-1): # <<<<<<<<<<<<<< * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_49backprop_softmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_49backprop_softmax = {"backprop_softmax", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_49backprop_softmax, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_49backprop_softmax(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_dY = 0; PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_softmax (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_Y,&__pyx_n_s_dY,&__pyx_n_s_axis,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softmax", 0, 3, 4, 1); __PYX_ERR(0, 303, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softmax", 0, 3, 4, 2); __PYX_ERR(0, 303, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_softmax") < 0)) __PYX_ERR(0, 303, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_Y = values[1]; __pyx_v_dY = values[2]; __pyx_v_axis = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_softmax", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 303, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_48backprop_softmax(__pyx_self, __pyx_v_self, __pyx_v_Y, __pyx_v_dY, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_48backprop_softmax(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_dY, PyObject *__pyx_v_axis) { PyObject *__pyx_v_dX = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__40) __Pyx_RefNannySetupContext("backprop_softmax", 0); __Pyx_TraceCall("backprop_softmax", __pyx_f[0], 303, 0, __PYX_ERR(0, 303, __pyx_L1_error)); /* "thinc/neural/ops.pyx":304 * * def backprop_softmax(self, Y, dY, axis=-1): * dX = Y * dY # <<<<<<<<<<<<<< * dX -= Y * dX.sum(axis=axis, keepdims=True) * return dX */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_Y, __pyx_v_dY); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_dX = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":305 * def backprop_softmax(self, Y, dY, axis=-1): * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) # <<<<<<<<<<<<<< * return dX * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dX, __pyx_n_s_sum); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_v_axis) < 0) __PYX_ERR(0, 305, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_keepdims, Py_True) < 0) __PYX_ERR(0, 305, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_v_Y, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceSubtract(__pyx_v_dX, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_dX, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":306 * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) * return dX # <<<<<<<<<<<<<< * * def backprop_softmax_sequences(self, dy, y, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_dX); __pyx_r = __pyx_v_dX; goto __pyx_L0; /* "thinc/neural/ops.pyx":303 * return new_x * * def backprop_softmax(self, Y, dY, axis=-1): # <<<<<<<<<<<<<< * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softmax", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dX); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":308 * return dX * * def backprop_softmax_sequences(self, dy, y, lengths): # <<<<<<<<<<<<<< * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_51backprop_softmax_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_51backprop_softmax_sequences = {"backprop_softmax_sequences", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_51backprop_softmax_sequences, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_51backprop_softmax_sequences(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dy = 0; PyObject *__pyx_v_y = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_softmax_sequences (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dy,&__pyx_n_s_y,&__pyx_n_s_lengths,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softmax_sequences", 1, 4, 4, 1); __PYX_ERR(0, 308, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softmax_sequences", 1, 4, 4, 2); __PYX_ERR(0, 308, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softmax_sequences", 1, 4, 4, 3); __PYX_ERR(0, 308, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_softmax_sequences") < 0)) __PYX_ERR(0, 308, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_dy = values[1]; __pyx_v_y = values[2]; __pyx_v_lengths = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_softmax_sequences", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 308, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softmax_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_50backprop_softmax_sequences(__pyx_self, __pyx_v_self, __pyx_v_dy, __pyx_v_y, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_50backprop_softmax_sequences(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dy, PyObject *__pyx_v_y, PyObject *__pyx_v_lengths) { PyObject *__pyx_v_dx = NULL; PyObject *__pyx_v_sumdx = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__41) __Pyx_RefNannySetupContext("backprop_softmax_sequences", 0); __Pyx_TraceCall("backprop_softmax_sequences", __pyx_f[0], 308, 0, __PYX_ERR(0, 308, __pyx_L1_error)); /* "thinc/neural/ops.pyx":309 * * def backprop_softmax_sequences(self, dy, y, lengths): * dx = y * dy # <<<<<<<<<<<<<< * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) * dx -= y * sumdx */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_y, __pyx_v_dy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_dx = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":310 * def backprop_softmax_sequences(self, dy, y, lengths): * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) # <<<<<<<<<<<<<< * dx -= y * sumdx * return dx */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_backprop_sum_pool); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_sum_pool); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_dx, __pyx_v_lengths}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_dx, __pyx_v_lengths}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_dx); __Pyx_GIVEREF(__pyx_v_dx); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_dx); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_lengths); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_lengths); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_sumdx = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":311 * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) * dx -= y * sumdx # <<<<<<<<<<<<<< * return dx * */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_y, __pyx_v_sumdx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_InPlaceSubtract(__pyx_v_dx, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_dx, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":312 * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) * dx -= y * sumdx * return dx # <<<<<<<<<<<<<< * * def expand_dims(self, a, axis=-1): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_dx); __pyx_r = __pyx_v_dx; goto __pyx_L0; /* "thinc/neural/ops.pyx":308 * return dX * * def backprop_softmax_sequences(self, dy, y, lengths): # <<<<<<<<<<<<<< * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softmax_sequences", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dx); __Pyx_XDECREF(__pyx_v_sumdx); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":314 * return dx * * def expand_dims(self, a, axis=-1): # <<<<<<<<<<<<<< * return self.xp.expand_dims(a, axis=axis) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_53expand_dims(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_53expand_dims = {"expand_dims", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_53expand_dims, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_53expand_dims(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_a = 0; PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("expand_dims (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_a,&__pyx_n_s_axis,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("expand_dims", 0, 2, 3, 1); __PYX_ERR(0, 314, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "expand_dims") < 0)) __PYX_ERR(0, 314, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_a = values[1]; __pyx_v_axis = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("expand_dims", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 314, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.expand_dims", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_52expand_dims(__pyx_self, __pyx_v_self, __pyx_v_a, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_52expand_dims(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_a, PyObject *__pyx_v_axis) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__42) __Pyx_RefNannySetupContext("expand_dims", 0); __Pyx_TraceCall("expand_dims", __pyx_f[0], 314, 0, __PYX_ERR(0, 314, __pyx_L1_error)); /* "thinc/neural/ops.pyx":315 * * def expand_dims(self, a, axis=-1): * return self.xp.expand_dims(a, axis=axis) # <<<<<<<<<<<<<< * * def clip_low(self, x, value, inplace=False): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_expand_dims); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_a); __Pyx_GIVEREF(__pyx_v_a); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axis, __pyx_v_axis) < 0) __PYX_ERR(0, 315, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":314 * return dx * * def expand_dims(self, a, axis=-1): # <<<<<<<<<<<<<< * return self.xp.expand_dims(a, axis=axis) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.expand_dims", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":317 * return self.xp.expand_dims(a, axis=axis) * * def clip_low(self, x, value, inplace=False): # <<<<<<<<<<<<<< * if inplace: * return self.xp.maximum(x, value, out=x) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_55clip_low(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_55clip_low = {"clip_low", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_55clip_low, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_55clip_low(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_value = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("clip_low (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_value,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_low", 0, 3, 4, 1); __PYX_ERR(0, 317, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_low", 0, 3, 4, 2); __PYX_ERR(0, 317, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "clip_low") < 0)) __PYX_ERR(0, 317, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_value = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("clip_low", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 317, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.clip_low", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_54clip_low(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_value, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_54clip_low(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_value, PyObject *__pyx_v_inplace) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__43) __Pyx_RefNannySetupContext("clip_low", 0); __Pyx_TraceCall("clip_low", __pyx_f[0], 317, 0, __PYX_ERR(0, 317, __pyx_L1_error)); /* "thinc/neural/ops.pyx":318 * * def clip_low(self, x, value, inplace=False): * if inplace: # <<<<<<<<<<<<<< * return self.xp.maximum(x, value, out=x) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 318, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":319 * def clip_low(self, x, value, inplace=False): * if inplace: * return self.xp.maximum(x, value, out=x) # <<<<<<<<<<<<<< * else: * return self.xp.maximum(x, value) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_maximum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_x); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_value); __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_out, __pyx_v_x) < 0) __PYX_ERR(0, 319, __pyx_L1_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":318 * * def clip_low(self, x, value, inplace=False): * if inplace: # <<<<<<<<<<<<<< * return self.xp.maximum(x, value, out=x) * else: */ } /* "thinc/neural/ops.pyx":321 * return self.xp.maximum(x, value, out=x) * else: * return self.xp.maximum(x, value) # <<<<<<<<<<<<<< * * def take_which(self, x, which, axis=-1): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_maximum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_value}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_value}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { __pyx_t_3 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_3, 0+__pyx_t_6, __pyx_v_x); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_6, __pyx_v_value); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":317 * return self.xp.expand_dims(a, axis=axis) * * def clip_low(self, x, value, inplace=False): # <<<<<<<<<<<<<< * if inplace: * return self.xp.maximum(x, value, out=x) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.clip_low", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":323 * return self.xp.maximum(x, value) * * def take_which(self, x, which, axis=-1): # <<<<<<<<<<<<<< * output = self.allocate(which.shape) * for i in range(x.shape[axis]): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_57take_which(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_57take_which = {"take_which", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_57take_which, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_57take_which(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_which = 0; PyObject *__pyx_v_axis = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("take_which (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_which,&__pyx_n_s_axis,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_neg_1)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("take_which", 0, 3, 4, 1); __PYX_ERR(0, 323, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("take_which", 0, 3, 4, 2); __PYX_ERR(0, 323, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "take_which") < 0)) __PYX_ERR(0, 323, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_which = values[2]; __pyx_v_axis = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("take_which", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 323, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.take_which", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_56take_which(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_which, __pyx_v_axis); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_56take_which(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_which, PyObject *__pyx_v_axis) { PyObject *__pyx_v_output = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__44) __Pyx_RefNannySetupContext("take_which", 0); __Pyx_TraceCall("take_which", __pyx_f[0], 323, 0, __PYX_ERR(0, 323, __pyx_L1_error)); /* "thinc/neural/ops.pyx":324 * * def take_which(self, x, which, axis=-1): * output = self.allocate(which.shape) # <<<<<<<<<<<<<< * for i in range(x.shape[axis]): * output += x[:,:,i] * (which == i) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_which, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_output = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":325 * def take_which(self, x, which, axis=-1): * output = self.allocate(which.shape) * for i in range(x.shape[axis]): # <<<<<<<<<<<<<< * output += x[:,:,i] * (which == i) * return output */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_axis); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 325, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 325, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 325, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_6(__pyx_t_2); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 325, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":326 * output = self.allocate(which.shape) * for i in range(x.shape[axis]): * output += x[:,:,i] * (which == i) # <<<<<<<<<<<<<< * return output * */ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_slice__3); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__3); __Pyx_INCREF(__pyx_v_i); __Pyx_GIVEREF(__pyx_v_i); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_i); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_RichCompare(__pyx_v_which, __pyx_v_i, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 326, __pyx_L1_error) __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_output, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_output, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":325 * def take_which(self, x, which, axis=-1): * output = self.allocate(which.shape) * for i in range(x.shape[axis]): # <<<<<<<<<<<<<< * output += x[:,:,i] * (which == i) * return output */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":327 * for i in range(x.shape[axis]): * output += x[:,:,i] * (which == i) * return output # <<<<<<<<<<<<<< * * def backprop_take(self, dX__bo, which__bo, nP): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_output); __pyx_r = __pyx_v_output; goto __pyx_L0; /* "thinc/neural/ops.pyx":323 * return self.xp.maximum(x, value) * * def take_which(self, x, which, axis=-1): # <<<<<<<<<<<<<< * output = self.allocate(which.shape) * for i in range(x.shape[axis]): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.take_which", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_output); __Pyx_XDECREF(__pyx_v_i); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":329 * return output * * def backprop_take(self, dX__bo, which__bo, nP): # <<<<<<<<<<<<<< * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_59backprop_take(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_59backprop_take = {"backprop_take", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_59backprop_take, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_59backprop_take(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dX__bo = 0; PyObject *__pyx_v_which__bo = 0; PyObject *__pyx_v_nP = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_take (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dX__bo,&__pyx_n_s_which__bo,&__pyx_n_s_nP,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dX__bo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_take", 1, 4, 4, 1); __PYX_ERR(0, 329, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which__bo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_take", 1, 4, 4, 2); __PYX_ERR(0, 329, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nP)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_take", 1, 4, 4, 3); __PYX_ERR(0, 329, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_take") < 0)) __PYX_ERR(0, 329, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_dX__bo = values[1]; __pyx_v_which__bo = values[2]; __pyx_v_nP = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_take", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 329, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_take", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_58backprop_take(__pyx_self, __pyx_v_self, __pyx_v_dX__bo, __pyx_v_which__bo, __pyx_v_nP); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_58backprop_take(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dX__bo, PyObject *__pyx_v_which__bo, PyObject *__pyx_v_nP) { PyObject *__pyx_v_dX__bop = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; PyObject *(*__pyx_t_7)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__45) __Pyx_RefNannySetupContext("backprop_take", 0); __Pyx_TraceCall("backprop_take", __pyx_f[0], 329, 0, __PYX_ERR(0, 329, __pyx_L1_error)); /* "thinc/neural/ops.pyx":330 * * def backprop_take(self, dX__bo, which__bo, nP): * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) # <<<<<<<<<<<<<< * for i in range(nP): * dX__bop[:, :, i] += dX__bo * (which__bo == i) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dX__bo, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dX__bo, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_3, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __Pyx_INCREF(__pyx_v_nP); __Pyx_GIVEREF(__pyx_v_nP); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_nP); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_dX__bop = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":331 * def backprop_take(self, dX__bo, which__bo, nP): * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): # <<<<<<<<<<<<<< * dX__bop[:, :, i] += dX__bo * (which__bo == i) * return dX__bop */ __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_v_nP); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 331, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_6 = 0; __pyx_t_7 = NULL; } else { __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 331, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 331, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_7)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 331, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 331, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 331, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 331, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_7(__pyx_t_2); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 331, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":332 * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): * dX__bop[:, :, i] += dX__bo * (which__bo == i) # <<<<<<<<<<<<<< * return dX__bop * */ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_slice__3); __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__3); __Pyx_INCREF(__pyx_v_i); __Pyx_GIVEREF(__pyx_v_i); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_i); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_dX__bop, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_which__bo, __pyx_v_i, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 332, __pyx_L1_error) __pyx_t_4 = PyNumber_Multiply(__pyx_v_dX__bo, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_InPlaceAdd(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(PyObject_SetItem(__pyx_v_dX__bop, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 332, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":331 * def backprop_take(self, dX__bo, which__bo, nP): * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): # <<<<<<<<<<<<<< * dX__bop[:, :, i] += dX__bo * (which__bo == i) * return dX__bop */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":333 * for i in range(nP): * dX__bop[:, :, i] += dX__bo * (which__bo == i) * return dX__bop # <<<<<<<<<<<<<< * * def lstm(self, output, cells, act_pieces, prev): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_dX__bop); __pyx_r = __pyx_v_dX__bop; goto __pyx_L0; /* "thinc/neural/ops.pyx":329 * return output * * def backprop_take(self, dX__bo, which__bo, nP): # <<<<<<<<<<<<<< * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_take", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dX__bop); __Pyx_XDECREF(__pyx_v_i); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":335 * return dX__bop * * def lstm(self, output, cells, act_pieces, prev): # <<<<<<<<<<<<<< * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_61lstm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_61lstm = {"lstm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_61lstm, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_61lstm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_output = 0; PyObject *__pyx_v_cells = 0; PyObject *__pyx_v_act_pieces = 0; PyObject *__pyx_v_prev = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lstm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_output,&__pyx_n_s_cells,&__pyx_n_s_act_pieces,&__pyx_n_s_prev,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_output)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("lstm", 1, 5, 5, 1); __PYX_ERR(0, 335, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cells)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("lstm", 1, 5, 5, 2); __PYX_ERR(0, 335, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_act_pieces)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("lstm", 1, 5, 5, 3); __PYX_ERR(0, 335, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prev)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("lstm", 1, 5, 5, 4); __PYX_ERR(0, 335, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lstm") < 0)) __PYX_ERR(0, 335, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_self = values[0]; __pyx_v_output = values[1]; __pyx_v_cells = values[2]; __pyx_v_act_pieces = values[3]; __pyx_v_prev = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("lstm", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 335, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.lstm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_60lstm(__pyx_self, __pyx_v_self, __pyx_v_output, __pyx_v_cells, __pyx_v_act_pieces, __pyx_v_prev); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_60lstm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_output, PyObject *__pyx_v_cells, PyObject *__pyx_v_act_pieces, PyObject *__pyx_v_prev) { PyObject *__pyx_v_hf = NULL; PyObject *__pyx_v_hi = NULL; PyObject *__pyx_v_ho = NULL; PyObject *__pyx_v_hc = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__46) __Pyx_RefNannySetupContext("lstm", 0); __Pyx_TraceCall("lstm", __pyx_f[0], 335, 0, __PYX_ERR(0, 335, __pyx_L1_error)); /* "thinc/neural/ops.pyx":336 * * def lstm(self, output, cells, act_pieces, prev): * hf, hi, ho, hc = act_pieces # <<<<<<<<<<<<<< * hf[:] = self.sigmoid(hf) * hi[:] = self.sigmoid(hi) */ if ((likely(PyTuple_CheckExact(__pyx_v_act_pieces))) || (PyList_CheckExact(__pyx_v_act_pieces))) { PyObject* sequence = __pyx_v_act_pieces; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 4)) { if (size > 4) __Pyx_RaiseTooManyValuesError(4); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 336, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 2); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 3); } else { __pyx_t_1 = PyList_GET_ITEM(sequence, 0); __pyx_t_2 = PyList_GET_ITEM(sequence, 1); __pyx_t_3 = PyList_GET_ITEM(sequence, 2); __pyx_t_4 = PyList_GET_ITEM(sequence, 3); } __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else { Py_ssize_t i; PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_2,&__pyx_t_3,&__pyx_t_4}; for (i=0; i < 4; i++) { PyObject* item = PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 336, __pyx_L1_error) __Pyx_GOTREF(item); *(temps[i]) = item; } } #endif } else { Py_ssize_t index = -1; PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_2,&__pyx_t_3,&__pyx_t_4}; __pyx_t_5 = PyObject_GetIter(__pyx_v_act_pieces); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 336, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; for (index=0; index < 4; index++) { PyObject* item = __pyx_t_6(__pyx_t_5); if (unlikely(!item)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(item); *(temps[index]) = item; } if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 4) < 0) __PYX_ERR(0, 336, __pyx_L1_error) __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 336, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_hf = __pyx_t_1; __pyx_t_1 = 0; __pyx_v_hi = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_ho = __pyx_t_3; __pyx_t_3 = 0; __pyx_v_hc = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":337 * def lstm(self, output, cells, act_pieces, prev): * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) # <<<<<<<<<<<<<< * hi[:] = self.sigmoid(hi) * ho[:] = self.sigmoid(ho) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_sigmoid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_4 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_hf) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_hf); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_hf, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 337, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":338 * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) * hi[:] = self.sigmoid(hi) # <<<<<<<<<<<<<< * ho[:] = self.sigmoid(ho) * hc[:] = self.xp.tanh(hc) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_sigmoid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_4 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_hi) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_hi); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_hi, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 338, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":339 * hf[:] = self.sigmoid(hf) * hi[:] = self.sigmoid(hi) * ho[:] = self.sigmoid(ho) # <<<<<<<<<<<<<< * hc[:] = self.xp.tanh(hc) * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_sigmoid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_4 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_ho) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_ho); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_ho, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":340 * hi[:] = self.sigmoid(hi) * ho[:] = self.sigmoid(ho) * hc[:] = self.xp.tanh(hc) # <<<<<<<<<<<<<< * * cells[:] = hf * prev + hi * hc */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_tanh); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_4 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_hc) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_hc); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_hc, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":342 * hc[:] = self.xp.tanh(hc) * * cells[:] = hf * prev + hi * hc # <<<<<<<<<<<<<< * output[:] = self.xp.tanh(cells) * ho * */ __pyx_t_4 = PyNumber_Multiply(__pyx_v_hf, __pyx_v_prev); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_v_hi, __pyx_v_hc); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Add(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_cells, __pyx_t_3, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 342, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":343 * * cells[:] = hf * prev + hi * hc * output[:] = self.xp.tanh(cells) * ho # <<<<<<<<<<<<<< * * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tanh); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_2, __pyx_v_cells) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_cells); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_ho); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_output, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 343, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":335 * return dX__bop * * def lstm(self, output, cells, act_pieces, prev): # <<<<<<<<<<<<<< * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.lstm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_hf); __Pyx_XDECREF(__pyx_v_hi); __Pyx_XDECREF(__pyx_v_ho); __Pyx_XDECREF(__pyx_v_hc); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":345 * output[:] = self.xp.tanh(cells) * ho * * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, # <<<<<<<<<<<<<< * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_63backprop_lstm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_63backprop_lstm = {"backprop_lstm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_63backprop_lstm, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_63backprop_lstm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d_cells = 0; PyObject *__pyx_v_d_prev = 0; PyObject *__pyx_v_d_gate_pieces = 0; PyObject *__pyx_v_d_output = 0; PyObject *__pyx_v_gate_pieces = 0; PyObject *__pyx_v_cells = 0; PyObject *__pyx_v_prev = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_lstm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_cells,&__pyx_n_s_d_prev,&__pyx_n_s_d_gate_pieces,&__pyx_n_s_d_output,&__pyx_n_s_gate_pieces,&__pyx_n_s_cells,&__pyx_n_s_prev,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_cells)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 1); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_prev)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 2); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_gate_pieces)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 3); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_output)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 4); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gate_pieces)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 5); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cells)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 6); __PYX_ERR(0, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prev)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, 7); __PYX_ERR(0, 345, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_lstm") < 0)) __PYX_ERR(0, 345, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 8) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); } __pyx_v_self = values[0]; __pyx_v_d_cells = values[1]; __pyx_v_d_prev = values[2]; __pyx_v_d_gate_pieces = values[3]; __pyx_v_d_output = values[4]; __pyx_v_gate_pieces = values[5]; __pyx_v_cells = values[6]; __pyx_v_prev = values[7]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_lstm", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_lstm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_62backprop_lstm(__pyx_self, __pyx_v_self, __pyx_v_d_cells, __pyx_v_d_prev, __pyx_v_d_gate_pieces, __pyx_v_d_output, __pyx_v_gate_pieces, __pyx_v_cells, __pyx_v_prev); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_62backprop_lstm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_d_cells, PyObject *__pyx_v_d_prev, PyObject *__pyx_v_d_gate_pieces, PyObject *__pyx_v_d_output, PyObject *__pyx_v_gate_pieces, PyObject *__pyx_v_cells, PyObject *__pyx_v_prev) { PyObject *__pyx_v_hf = NULL; PyObject *__pyx_v_hi = NULL; PyObject *__pyx_v_ho = NULL; PyObject *__pyx_v_hc = NULL; PyObject *__pyx_v_dhf = NULL; PyObject *__pyx_v_dhi = NULL; PyObject *__pyx_v_dho = NULL; PyObject *__pyx_v_dhc = NULL; PyObject *__pyx_v_ct = NULL; PyObject *__pyx_v_dc = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__47) __Pyx_RefNannySetupContext("backprop_lstm", 0); __Pyx_TraceCall("backprop_lstm", __pyx_f[0], 345, 0, __PYX_ERR(0, 345, __pyx_L1_error)); /* "thinc/neural/ops.pyx":347 * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces # <<<<<<<<<<<<<< * dhf, dhi, dho, dhc = d_gate_pieces * */ if ((likely(PyTuple_CheckExact(__pyx_v_gate_pieces))) || (PyList_CheckExact(__pyx_v_gate_pieces))) { PyObject* sequence = __pyx_v_gate_pieces; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 4)) { if (size > 4) __Pyx_RaiseTooManyValuesError(4); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 347, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 2); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 3); } else { __pyx_t_1 = PyList_GET_ITEM(sequence, 0); __pyx_t_2 = PyList_GET_ITEM(sequence, 1); __pyx_t_3 = PyList_GET_ITEM(sequence, 2); __pyx_t_4 = PyList_GET_ITEM(sequence, 3); } __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else { Py_ssize_t i; PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_2,&__pyx_t_3,&__pyx_t_4}; for (i=0; i < 4; i++) { PyObject* item = PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 347, __pyx_L1_error) __Pyx_GOTREF(item); *(temps[i]) = item; } } #endif } else { Py_ssize_t index = -1; PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_2,&__pyx_t_3,&__pyx_t_4}; __pyx_t_5 = PyObject_GetIter(__pyx_v_gate_pieces); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 347, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; for (index=0; index < 4; index++) { PyObject* item = __pyx_t_6(__pyx_t_5); if (unlikely(!item)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(item); *(temps[index]) = item; } if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 4) < 0) __PYX_ERR(0, 347, __pyx_L1_error) __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 347, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_hf = __pyx_t_1; __pyx_t_1 = 0; __pyx_v_hi = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_ho = __pyx_t_3; __pyx_t_3 = 0; __pyx_v_hc = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":348 * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces * dhf, dhi, dho, dhc = d_gate_pieces # <<<<<<<<<<<<<< * * ct = self.xp.tanh(cells) */ if ((likely(PyTuple_CheckExact(__pyx_v_d_gate_pieces))) || (PyList_CheckExact(__pyx_v_d_gate_pieces))) { PyObject* sequence = __pyx_v_d_gate_pieces; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 4)) { if (size > 4) __Pyx_RaiseTooManyValuesError(4); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 348, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 2); __pyx_t_1 = PyTuple_GET_ITEM(sequence, 3); } else { __pyx_t_4 = PyList_GET_ITEM(sequence, 0); __pyx_t_3 = PyList_GET_ITEM(sequence, 1); __pyx_t_2 = PyList_GET_ITEM(sequence, 2); __pyx_t_1 = PyList_GET_ITEM(sequence, 3); } __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); #else { Py_ssize_t i; PyObject** temps[4] = {&__pyx_t_4,&__pyx_t_3,&__pyx_t_2,&__pyx_t_1}; for (i=0; i < 4; i++) { PyObject* item = PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 348, __pyx_L1_error) __Pyx_GOTREF(item); *(temps[i]) = item; } } #endif } else { Py_ssize_t index = -1; PyObject** temps[4] = {&__pyx_t_4,&__pyx_t_3,&__pyx_t_2,&__pyx_t_1}; __pyx_t_5 = PyObject_GetIter(__pyx_v_d_gate_pieces); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 348, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; for (index=0; index < 4; index++) { PyObject* item = __pyx_t_6(__pyx_t_5); if (unlikely(!item)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(item); *(temps[index]) = item; } if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 4) < 0) __PYX_ERR(0, 348, __pyx_L1_error) __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 348, __pyx_L1_error) __pyx_L6_unpacking_done:; } __pyx_v_dhf = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_dhi = __pyx_t_3; __pyx_t_3 = 0; __pyx_v_dho = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_dhc = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":350 * dhf, dhi, dho, dhc = d_gate_pieces * * ct = self.xp.tanh(cells) # <<<<<<<<<<<<<< * * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 350, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tanh); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 350, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_cells) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_cells); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 350, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_ct = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":353 * * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) * dho[:] = ct * d_output * self.dsigmoid(ho) # <<<<<<<<<<<<<< * dc = ho * d_output * self.dtanh(ct) * dc += d_cells # Carry gradient from previous step */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_ct, __pyx_v_d_output); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dsigmoid); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v_ho) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_ho); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_dho, __pyx_t_2, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":354 * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) * dho[:] = ct * d_output * self.dsigmoid(ho) * dc = ho * d_output * self.dtanh(ct) # <<<<<<<<<<<<<< * dc += d_cells # Carry gradient from previous step * */ __pyx_t_2 = PyNumber_Multiply(__pyx_v_ho, __pyx_v_d_output); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dtanh); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_4, __pyx_v_ct) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_ct); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dc = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":355 * dho[:] = ct * d_output * self.dsigmoid(ho) * dc = ho * d_output * self.dtanh(ct) * dc += d_cells # Carry gradient from previous step # <<<<<<<<<<<<<< * * # Gradient for hf, hi, hc, prev[i] */ __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_dc, __pyx_v_d_cells); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 355, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_dc, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":359 * # Gradient for hf, hi, hc, prev[i] * # in c = sigmoid(hf) * prev[i] + sigmoid(hi) * tanh(hc) * dhf[:] = self.dsigmoid(hf) * dc * prev # <<<<<<<<<<<<<< * dhi[:] = self.dsigmoid(hi) * dc * hc * dhc[:] = self.dtanh(hc) * dc * hi */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dsigmoid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_hf) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_hf); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_v_dc); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_prev); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_dhf, __pyx_t_1, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 359, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":360 * # in c = sigmoid(hf) * prev[i] + sigmoid(hi) * tanh(hc) * dhf[:] = self.dsigmoid(hf) * dc * prev * dhi[:] = self.dsigmoid(hi) * dc * hc # <<<<<<<<<<<<<< * dhc[:] = self.dtanh(hc) * dc * hi * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dsigmoid); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_hi) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_hi); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_v_dc); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_hc); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_dhi, __pyx_t_1, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 360, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":361 * dhf[:] = self.dsigmoid(hf) * dc * prev * dhi[:] = self.dsigmoid(hi) * dc * hc * dhc[:] = self.dtanh(hc) * dc * hi # <<<<<<<<<<<<<< * * d_prev[:] = dc * hf */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dtanh); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_hc) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_hc); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_v_dc); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_hi); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_dhc, __pyx_t_1, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 361, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":363 * dhc[:] = self.dtanh(hc) * dc * hi * * d_prev[:] = dc * hf # <<<<<<<<<<<<<< * copy_array(d_cells, dc) * */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_dc, __pyx_v_hf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__Pyx_PyObject_SetSlice(__pyx_v_d_prev, __pyx_t_1, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 363, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":364 * * d_prev[:] = dc * hf * copy_array(d_cells, dc) # <<<<<<<<<<<<<< * * def softplus(self, X, threshold=20., out=None): */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_cells, __pyx_v_dc}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_cells, __pyx_v_dc}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_d_cells); __Pyx_GIVEREF(__pyx_v_d_cells); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_7, __pyx_v_d_cells); __Pyx_INCREF(__pyx_v_dc); __Pyx_GIVEREF(__pyx_v_dc); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_7, __pyx_v_dc); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":345 * output[:] = self.xp.tanh(cells) * ho * * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, # <<<<<<<<<<<<<< * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_lstm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_hf); __Pyx_XDECREF(__pyx_v_hi); __Pyx_XDECREF(__pyx_v_ho); __Pyx_XDECREF(__pyx_v_hc); __Pyx_XDECREF(__pyx_v_dhf); __Pyx_XDECREF(__pyx_v_dhi); __Pyx_XDECREF(__pyx_v_dho); __Pyx_XDECREF(__pyx_v_dhc); __Pyx_XDECREF(__pyx_v_ct); __Pyx_XDECREF(__pyx_v_dc); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":366 * copy_array(d_cells, dc) * * def softplus(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_65softplus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_65softplus = {"softplus", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_65softplus, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_65softplus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("softplus (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject*)__pyx_float_20_)); values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("softplus", 0, 2, 4, 1); __PYX_ERR(0, 366, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "softplus") < 0)) __PYX_ERR(0, 366, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_threshold = values[2]; __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("softplus", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 366, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.softplus", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_64softplus(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_64softplus(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_log1p_exp = NULL; PyObject *__pyx_v_indices = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__48) __Pyx_RefNannySetupContext("softplus", 0); __Pyx_TraceCall("softplus", __pyx_f[0], 366, 0, __PYX_ERR(0, 366, __pyx_L1_error)); /* "thinc/neural/ops.pyx":367 * * def softplus(self, X, threshold=20., out=None): * xp = get_array_module(X) # <<<<<<<<<<<<<< * log1p_exp = xp.log1p(xp.exp(X)) * indices = X >= threshold */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_X); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_xp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":368 * def softplus(self, X, threshold=20., out=None): * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) # <<<<<<<<<<<<<< * indices = X >= threshold * log1p_exp[indices] = X[indices] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_log1p); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_X); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_log1p_exp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":369 * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) * indices = X >= threshold # <<<<<<<<<<<<<< * log1p_exp[indices] = X[indices] * if out is None: */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_X, __pyx_v_threshold, Py_GE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 369, __pyx_L1_error) __pyx_v_indices = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":370 * log1p_exp = xp.log1p(xp.exp(X)) * indices = X >= threshold * log1p_exp[indices] = X[indices] # <<<<<<<<<<<<<< * if out is None: * return log1p_exp */ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_X, __pyx_v_indices); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 370, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_v_log1p_exp, __pyx_v_indices, __pyx_t_1) < 0)) __PYX_ERR(0, 370, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":371 * indices = X >= threshold * log1p_exp[indices] = X[indices] * if out is None: # <<<<<<<<<<<<<< * return log1p_exp * else: */ __pyx_t_6 = (__pyx_v_out == Py_None); __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { /* "thinc/neural/ops.pyx":372 * log1p_exp[indices] = X[indices] * if out is None: * return log1p_exp # <<<<<<<<<<<<<< * else: * out[:] = log1p_exp */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_log1p_exp); __pyx_r = __pyx_v_log1p_exp; goto __pyx_L0; /* "thinc/neural/ops.pyx":371 * indices = X >= threshold * log1p_exp[indices] = X[indices] * if out is None: # <<<<<<<<<<<<<< * return log1p_exp * else: */ } /* "thinc/neural/ops.pyx":374 * return log1p_exp * else: * out[:] = log1p_exp # <<<<<<<<<<<<<< * return out * */ /*else*/ { if (__Pyx_PyObject_SetSlice(__pyx_v_out, __pyx_v_log1p_exp, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 374, __pyx_L1_error) /* "thinc/neural/ops.pyx":375 * else: * out[:] = log1p_exp * return out # <<<<<<<<<<<<<< * * def backprop_softplus(self, dY, X, threshold=20, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; } /* "thinc/neural/ops.pyx":366 * copy_array(d_cells, dc) * * def softplus(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.Ops.softplus", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_log1p_exp); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":377 * return out * * def backprop_softplus(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * if out is None: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_67backprop_softplus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_67backprop_softplus = {"backprop_softplus", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_67backprop_softplus, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_67backprop_softplus(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_softplus (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_20)); values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softplus", 0, 3, 5, 1); __PYX_ERR(0, 377, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_softplus", 0, 3, 5, 2); __PYX_ERR(0, 377, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_softplus") < 0)) __PYX_ERR(0, 377, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_X = values[2]; __pyx_v_threshold = values[3]; __pyx_v_out = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_softplus", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 377, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softplus", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_66backprop_softplus(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_66backprop_softplus(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_indices = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__49) __Pyx_RefNannySetupContext("backprop_softplus", 0); __Pyx_TraceCall("backprop_softplus", __pyx_f[0], 377, 0, __PYX_ERR(0, 377, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_out); /* "thinc/neural/ops.pyx":378 * * def backprop_softplus(self, dY, X, threshold=20, out=None): * xp = get_array_module(X) # <<<<<<<<<<<<<< * if out is None: * out = xp.zeros(X.shape, dtype="f") */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 378, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_X); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 378, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_xp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":379 * def backprop_softplus(self, dY, X, threshold=20, out=None): * xp = get_array_module(X) * if out is None: # <<<<<<<<<<<<<< * out = xp.zeros(X.shape, dtype="f") * out[:] = 1 - 1 / (1 + xp.exp(X)) */ __pyx_t_4 = (__pyx_v_out == Py_None); __pyx_t_5 = (__pyx_t_4 != 0); if (__pyx_t_5) { /* "thinc/neural/ops.pyx":380 * xp = get_array_module(X) * if out is None: * out = xp.zeros(X.shape, dtype="f") # <<<<<<<<<<<<<< * out[:] = 1 - 1 / (1 + xp.exp(X)) * out *= dY */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_s_f) < 0) __PYX_ERR(0, 380, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_out, __pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":379 * def backprop_softplus(self, dY, X, threshold=20, out=None): * xp = get_array_module(X) * if out is None: # <<<<<<<<<<<<<< * out = xp.zeros(X.shape, dtype="f") * out[:] = 1 - 1 / (1 + xp.exp(X)) */ } /* "thinc/neural/ops.pyx":381 * if out is None: * out = xp.zeros(X.shape, dtype="f") * out[:] = 1 - 1 / (1 + xp.exp(X)) # <<<<<<<<<<<<<< * out *= dY * indices = X >= threshold */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_X); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_AddCObj(__pyx_int_1, __pyx_t_6, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_int_1, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_t_6, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_out, __pyx_t_2, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 381, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":382 * out = xp.zeros(X.shape, dtype="f") * out[:] = 1 - 1 / (1 + xp.exp(X)) * out *= dY # <<<<<<<<<<<<<< * indices = X >= threshold * out[indices] = dY[indices] */ __pyx_t_2 = PyNumber_InPlaceMultiply(__pyx_v_out, __pyx_v_dY); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_out, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":383 * out[:] = 1 - 1 / (1 + xp.exp(X)) * out *= dY * indices = X >= threshold # <<<<<<<<<<<<<< * out[indices] = dY[indices] * return out */ __pyx_t_2 = PyObject_RichCompare(__pyx_v_X, __pyx_v_threshold, Py_GE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 383, __pyx_L1_error) __pyx_v_indices = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":384 * out *= dY * indices = X >= threshold * out[indices] = dY[indices] # <<<<<<<<<<<<<< * return out * */ __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_dY, __pyx_v_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(PyObject_SetItem(__pyx_v_out, __pyx_v_indices, __pyx_t_2) < 0)) __PYX_ERR(0, 384, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":385 * indices = X >= threshold * out[indices] = dY[indices] * return out # <<<<<<<<<<<<<< * * def mish(self, X, threshold=20., out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":377 * return out * * def backprop_softplus(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * if out is None: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_softplus", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XDECREF(__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":387 * return out * * def mish(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_69mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_69mish = {"mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_69mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_69mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject*)__pyx_float_20_)); values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, 1); __PYX_ERR(0, 387, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mish") < 0)) __PYX_ERR(0, 387, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_threshold = values[2]; __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 387, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_68mish(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_68mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_Xsoft = NULL; PyObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__50) __Pyx_RefNannySetupContext("mish", 0); __Pyx_TraceCall("mish", __pyx_f[0], 387, 0, __PYX_ERR(0, 387, __pyx_L1_error)); /* "thinc/neural/ops.pyx":388 * * def mish(self, X, threshold=20., out=None): * Xsoft = self.softplus(X, threshold=threshold, out=out) # <<<<<<<<<<<<<< * Y = self.xp.tanh(Xsoft, out=out) * Y *= X */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_softplus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_X); __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_threshold, __pyx_v_threshold) < 0) __PYX_ERR(0, 388, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 388, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_Xsoft = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":389 * def mish(self, X, threshold=20., out=None): * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) # <<<<<<<<<<<<<< * Y *= X * return Y */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_tanh); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_Xsoft); __Pyx_GIVEREF(__pyx_v_Xsoft); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_Xsoft); __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 389, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_Y = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":390 * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) * Y *= X # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = PyNumber_InPlaceMultiply(__pyx_v_Y, __pyx_v_X); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 390, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_Y, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":391 * Y = self.xp.tanh(Xsoft, out=out) * Y *= X * return Y # <<<<<<<<<<<<<< * * def backprop_mish(self, dY, X, threshold=20, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Y); __pyx_r = __pyx_v_Y; goto __pyx_L0; /* "thinc/neural/ops.pyx":387 * return out * * def mish(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_Xsoft); __Pyx_XDECREF(__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":393 * return Y * * def backprop_mish(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * indices = X < threshold */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_71backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_71backprop_mish = {"backprop_mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_71backprop_mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_71backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_20)); values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 1); __PYX_ERR(0, 393, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 2); __PYX_ERR(0, 393, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_mish") < 0)) __PYX_ERR(0, 393, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_X = values[2]; __pyx_v_threshold = values[3]; __pyx_v_out = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 393, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_70backprop_mish(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_70backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_indices = NULL; PyObject *__pyx_v_Xsub = NULL; PyObject *__pyx_v_dYsub = NULL; PyObject *__pyx_v_omega = NULL; PyObject *__pyx_v_delta = NULL; PyObject *__pyx_v_dXsub = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__51) __Pyx_RefNannySetupContext("backprop_mish", 0); __Pyx_TraceCall("backprop_mish", __pyx_f[0], 393, 0, __PYX_ERR(0, 393, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_out); /* "thinc/neural/ops.pyx":394 * * def backprop_mish(self, dY, X, threshold=20, out=None): * xp = get_array_module(X) # <<<<<<<<<<<<<< * indices = X < threshold * Xsub = X[indices] */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_X); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 394, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_xp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":395 * def backprop_mish(self, dY, X, threshold=20, out=None): * xp = get_array_module(X) * indices = X < threshold # <<<<<<<<<<<<<< * Xsub = X[indices] * dYsub = dY[indices] */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_X, __pyx_v_threshold, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 395, __pyx_L1_error) __pyx_v_indices = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":396 * xp = get_array_module(X) * indices = X < threshold * Xsub = X[indices] # <<<<<<<<<<<<<< * dYsub = dY[indices] * omega = 4. * (Xsub+1.) */ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_X, __pyx_v_indices); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_Xsub = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":397 * indices = X < threshold * Xsub = X[indices] * dYsub = dY[indices] # <<<<<<<<<<<<<< * omega = 4. * (Xsub+1.) * omega += 4. * xp.exp(2.*Xsub) */ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_dY, __pyx_v_indices); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_dYsub = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":398 * Xsub = X[indices] * dYsub = dY[indices] * omega = 4. * (Xsub+1.) # <<<<<<<<<<<<<< * omega += 4. * xp.exp(2.*Xsub) * omega += xp.exp(Xsub) * ((4.*Xsub)+6.) */ __pyx_t_1 = __Pyx_PyFloat_AddObjC(__pyx_v_Xsub, __pyx_float_1_, 1., 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_omega = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":399 * dYsub = dY[indices] * omega = 4. * (Xsub+1.) * omega += 4. * xp.exp(2.*Xsub) # <<<<<<<<<<<<<< * omega += xp.exp(Xsub) * ((4.*Xsub)+6.) * delta = 2. * xp.exp(Xsub) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyNumber_Multiply(__pyx_float_2_, __pyx_v_Xsub); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_omega, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_omega, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":400 * omega = 4. * (Xsub+1.) * omega += 4. * xp.exp(2.*Xsub) * omega += xp.exp(Xsub) * ((4.*Xsub)+6.) # <<<<<<<<<<<<<< * delta = 2. * xp.exp(Xsub) * delta += xp.exp(2.*Xsub) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_2 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_Xsub) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_Xsub); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_float_4_, __pyx_v_Xsub); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyFloat_AddObjC(__pyx_t_1, __pyx_float_6_, 6., 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_omega, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_omega, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":401 * omega += 4. * xp.exp(2.*Xsub) * omega += xp.exp(Xsub) * ((4.*Xsub)+6.) * delta = 2. * xp.exp(Xsub) # <<<<<<<<<<<<<< * delta += xp.exp(2.*Xsub) * delta += 2. */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_3 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_2, __pyx_v_Xsub) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_Xsub); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_float_2_, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_delta = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":402 * omega += xp.exp(Xsub) * ((4.*Xsub)+6.) * delta = 2. * xp.exp(Xsub) * delta += xp.exp(2.*Xsub) # <<<<<<<<<<<<<< * delta += 2. * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyNumber_Multiply(__pyx_float_2_, __pyx_v_Xsub); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_delta, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_delta, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":403 * delta = 2. * xp.exp(Xsub) * delta += xp.exp(2.*Xsub) * delta += 2. # <<<<<<<<<<<<<< * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) * if out is None: */ __pyx_t_3 = __Pyx_PyFloat_AddObjC(__pyx_v_delta, __pyx_float_2_, 2., 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_delta, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":404 * delta += xp.exp(2.*Xsub) * delta += 2. * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) # <<<<<<<<<<<<<< * if out is None: * out = xp.zeros(dY.shape, dtype="f") */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_3 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_2, __pyx_v_Xsub) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_Xsub); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_omega); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Power(__pyx_v_delta, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_v_dYsub, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_dXsub = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":405 * delta += 2. * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) * if out is None: # <<<<<<<<<<<<<< * out = xp.zeros(dY.shape, dtype="f") * # Gradient when above threshold will ignore softplus. */ __pyx_t_5 = (__pyx_v_out == Py_None); __pyx_t_6 = (__pyx_t_5 != 0); if (__pyx_t_6) { /* "thinc/neural/ops.pyx":406 * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) * if out is None: * out = xp.zeros(dY.shape, dtype="f") # <<<<<<<<<<<<<< * # Gradient when above threshold will ignore softplus. * out[:] = dY + dY * self.dtanh(X) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dY, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_s_f) < 0) __PYX_ERR(0, 406, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_out, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":405 * delta += 2. * dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta**2)) * if out is None: # <<<<<<<<<<<<<< * out = xp.zeros(dY.shape, dtype="f") * # Gradient when above threshold will ignore softplus. */ } /* "thinc/neural/ops.pyx":408 * out = xp.zeros(dY.shape, dtype="f") * # Gradient when above threshold will ignore softplus. * out[:] = dY + dY * self.dtanh(X) # <<<<<<<<<<<<<< * out[indices] = dXsub * return out */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dtanh); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_4 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_X); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_v_dY, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyNumber_Add(__pyx_v_dY, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__Pyx_PyObject_SetSlice(__pyx_v_out, __pyx_t_4, 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 408, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":409 * # Gradient when above threshold will ignore softplus. * out[:] = dY + dY * self.dtanh(X) * out[indices] = dXsub # <<<<<<<<<<<<<< * return out * */ if (unlikely(PyObject_SetItem(__pyx_v_out, __pyx_v_indices, __pyx_v_dXsub) < 0)) __PYX_ERR(0, 409, __pyx_L1_error) /* "thinc/neural/ops.pyx":410 * out[:] = dY + dY * self.dtanh(X) * out[indices] = dXsub * return out # <<<<<<<<<<<<<< * * def xavier_uniform_init(self, W, inplace=True): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":393 * return Y * * def backprop_mish(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * indices = X < threshold */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.Ops.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XDECREF(__pyx_v_Xsub); __Pyx_XDECREF(__pyx_v_dYsub); __Pyx_XDECREF(__pyx_v_omega); __Pyx_XDECREF(__pyx_v_delta); __Pyx_XDECREF(__pyx_v_dXsub); __Pyx_XDECREF(__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":412 * return out * * def xavier_uniform_init(self, W, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_73xavier_uniform_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_73xavier_uniform_init = {"xavier_uniform_init", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_73xavier_uniform_init, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_73xavier_uniform_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_W = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("xavier_uniform_init (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_W,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_W)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("xavier_uniform_init", 0, 2, 3, 1); __PYX_ERR(0, 412, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "xavier_uniform_init") < 0)) __PYX_ERR(0, 412, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_W = values[1]; __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("xavier_uniform_init", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 412, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.xavier_uniform_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_72xavier_uniform_init(__pyx_self, __pyx_v_self, __pyx_v_W, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_72xavier_uniform_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_inplace) { PyObject *__pyx_v_scale = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__52) __Pyx_RefNannySetupContext("xavier_uniform_init", 0); __Pyx_TraceCall("xavier_uniform_init", __pyx_f[0], 412, 0, __PYX_ERR(0, 412, __pyx_L1_error)); /* "thinc/neural/ops.pyx":413 * * def xavier_uniform_init(self, W, inplace=True): * if (W**2).sum() != 0.: # <<<<<<<<<<<<<< * return W * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) */ __pyx_t_2 = PyNumber_Power(__pyx_v_W, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2) : __Pyx_PyObject_CallNoArg(__pyx_t_3); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyFloat_NeObjC(__pyx_t_1, __pyx_float_0_, 0., 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 413, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { /* "thinc/neural/ops.pyx":414 * def xavier_uniform_init(self, W, inplace=True): * if (W**2).sum() != 0.: * return W # <<<<<<<<<<<<<< * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) * if inplace: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_W); __pyx_r = __pyx_v_W; goto __pyx_L0; /* "thinc/neural/ops.pyx":413 * * def xavier_uniform_init(self, W, inplace=True): * if (W**2).sum() != 0.: # <<<<<<<<<<<<<< * return W * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) */ } /* "thinc/neural/ops.pyx":415 * if (W**2).sum() != 0.: * return W * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) # <<<<<<<<<<<<<< * if inplace: * copy_array(W, self.xp.random.uniform(-scale, scale, W.shape)) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyFloat_DivideCObj(__pyx_float_6_, __pyx_t_1, 6., 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_6); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_scale = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":416 * return W * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, self.xp.random.uniform(-scale, scale, W.shape)) * return W */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 416, __pyx_L1_error) if (__pyx_t_4) { /* "thinc/neural/ops.pyx":417 * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) * if inplace: * copy_array(W, self.xp.random.uniform(-scale, scale, W.shape)) # <<<<<<<<<<<<<< * return W * else: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_uniform); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Negative(__pyx_v_scale); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_5, __pyx_v_scale, __pyx_t_7}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_5, __pyx_v_scale, __pyx_t_7}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } else #endif { __pyx_t_10 = PyTuple_New(3+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_8) { __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL; } __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_9, __pyx_t_5); __Pyx_INCREF(__pyx_v_scale); __Pyx_GIVEREF(__pyx_v_scale); PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_v_scale); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 2+__pyx_t_9, __pyx_t_7); __pyx_t_5 = 0; __pyx_t_7 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_W, __pyx_t_6}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_W, __pyx_t_6}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_10 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_INCREF(__pyx_v_W); __Pyx_GIVEREF(__pyx_v_W); PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_9, __pyx_v_W); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":418 * if inplace: * copy_array(W, self.xp.random.uniform(-scale, scale, W.shape)) * return W # <<<<<<<<<<<<<< * else: * return self.xp.random.uniform(-scale, scale, W.shape) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_W); __pyx_r = __pyx_v_W; goto __pyx_L0; /* "thinc/neural/ops.pyx":416 * return W * scale = self.xp.sqrt(6. / (W.shape[0] + W.shape[1])) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, self.xp.random.uniform(-scale, scale, W.shape)) * return W */ } /* "thinc/neural/ops.pyx":420 * return W * else: * return self.xp.random.uniform(-scale, scale, W.shape) # <<<<<<<<<<<<<< * * def normal_init(self, W, fan_in, inplace=True): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_uniform); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyNumber_Negative(__pyx_v_scale); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_t_10, __pyx_v_scale, __pyx_t_6}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_t_10, __pyx_v_scale, __pyx_t_6}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_7 = PyTuple_New(3+__pyx_t_9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_9, __pyx_t_10); __Pyx_INCREF(__pyx_v_scale); __Pyx_GIVEREF(__pyx_v_scale); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_9, __pyx_v_scale); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_9, __pyx_t_6); __pyx_t_10 = 0; __pyx_t_6 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":412 * return out * * def xavier_uniform_init(self, W, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("thinc.neural.ops.Ops.xavier_uniform_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_scale); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":422 * return self.xp.random.uniform(-scale, scale, W.shape) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_75normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_75normal_init = {"normal_init", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_75normal_init, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_75normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_W = 0; PyObject *__pyx_v_fan_in = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("normal_init (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_W,&__pyx_n_s_fan_in,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_W)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, 1); __PYX_ERR(0, 422, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fan_in)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, 2); __PYX_ERR(0, 422, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal_init") < 0)) __PYX_ERR(0, 422, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_W = values[1]; __pyx_v_fan_in = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 422, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_74normal_init(__pyx_self, __pyx_v_self, __pyx_v_W, __pyx_v_fan_in, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_74normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_fan_in, PyObject *__pyx_v_inplace) { PyObject *__pyx_v_scale = NULL; PyObject *__pyx_v_inits = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__53) __Pyx_RefNannySetupContext("normal_init", 0); __Pyx_TraceCall("normal_init", __pyx_f[0], 422, 0, __PYX_ERR(0, 422, __pyx_L1_error)); /* "thinc/neural/ops.pyx":423 * * def normal_init(self, W, fan_in, inplace=True): * if (W**2).sum() != 0.: # <<<<<<<<<<<<<< * return W * scale = self.xp.sqrt(1. / fan_in) */ __pyx_t_2 = PyNumber_Power(__pyx_v_W, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2) : __Pyx_PyObject_CallNoArg(__pyx_t_3); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyFloat_NeObjC(__pyx_t_1, __pyx_float_0_, 0., 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 423, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { /* "thinc/neural/ops.pyx":424 * def normal_init(self, W, fan_in, inplace=True): * if (W**2).sum() != 0.: * return W # <<<<<<<<<<<<<< * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_W); __pyx_r = __pyx_v_W; goto __pyx_L0; /* "thinc/neural/ops.pyx":423 * * def normal_init(self, W, fan_in, inplace=True): * if (W**2).sum() != 0.: # <<<<<<<<<<<<<< * return W * scale = self.xp.sqrt(1. / fan_in) */ } /* "thinc/neural/ops.pyx":425 * if (W**2).sum() != 0.: * return W * scale = self.xp.sqrt(1. / fan_in) # <<<<<<<<<<<<<< * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyFloat_DivideCObj(__pyx_float_1_, __pyx_v_fan_in, 1., 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_scale = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":426 * return W * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) # <<<<<<<<<<<<<< * inits = inits.reshape(W.shape) * if inplace: */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_normal); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_scale, __pyx_v_scale) < 0) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_prod); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_size, __pyx_t_5) < 0) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_inits = __pyx_t_5; __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":427 * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) # <<<<<<<<<<<<<< * if inplace: * copy_array(W, inits) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_inits, __pyx_n_s_reshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_5 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_inits, __pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":428 * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, inits) * return W */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 428, __pyx_L1_error) if (__pyx_t_4) { /* "thinc/neural/ops.pyx":429 * inits = inits.reshape(W.shape) * if inplace: * copy_array(W, inits) # <<<<<<<<<<<<<< * return W * else: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_W, __pyx_v_inits}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_W, __pyx_v_inits}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { __pyx_t_1 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = NULL; } __Pyx_INCREF(__pyx_v_W); __Pyx_GIVEREF(__pyx_v_W); PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_8, __pyx_v_W); __Pyx_INCREF(__pyx_v_inits); __Pyx_GIVEREF(__pyx_v_inits); PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_8, __pyx_v_inits); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":430 * if inplace: * copy_array(W, inits) * return W # <<<<<<<<<<<<<< * else: * return inits */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_W); __pyx_r = __pyx_v_W; goto __pyx_L0; /* "thinc/neural/ops.pyx":428 * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, inits) * return W */ } /* "thinc/neural/ops.pyx":432 * return W * else: * return inits # <<<<<<<<<<<<<< * * def he_normal_init(self, shape, fan_in): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_inits); __pyx_r = __pyx_v_inits; goto __pyx_L0; } /* "thinc/neural/ops.pyx":422 * return self.xp.random.uniform(-scale, scale, W.shape) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.Ops.normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_scale); __Pyx_XDECREF(__pyx_v_inits); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":434 * return inits * * def he_normal_init(self, shape, fan_in): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(2. / fan_in) * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_77he_normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_77he_normal_init = {"he_normal_init", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_77he_normal_init, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_77he_normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_shape = 0; PyObject *__pyx_v_fan_in = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("he_normal_init (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_shape,&__pyx_n_s_fan_in,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("he_normal_init", 1, 3, 3, 1); __PYX_ERR(0, 434, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fan_in)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("he_normal_init", 1, 3, 3, 2); __PYX_ERR(0, 434, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "he_normal_init") < 0)) __PYX_ERR(0, 434, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_shape = values[1]; __pyx_v_fan_in = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("he_normal_init", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 434, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.he_normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_76he_normal_init(__pyx_self, __pyx_v_self, __pyx_v_shape, __pyx_v_fan_in); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_76he_normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_fan_in) { PyObject *__pyx_v_scale = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__54) __Pyx_RefNannySetupContext("he_normal_init", 0); __Pyx_TraceCall("he_normal_init", __pyx_f[0], 434, 0, __PYX_ERR(0, 434, __pyx_L1_error)); /* "thinc/neural/ops.pyx":435 * * def he_normal_init(self, shape, fan_in): * scale = self.xp.sqrt(2. / fan_in) # <<<<<<<<<<<<<< * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyFloat_DivideCObj(__pyx_float_2_, __pyx_v_fan_in, 2., 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_scale = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":436 * def he_normal_init(self, shape, fan_in): * scale = self.xp.sqrt(2. / fan_in) * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) # <<<<<<<<<<<<<< * * def update_averages(self, ema, weights, t, max_decay=0.9999): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_normal); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_scale, __pyx_v_scale) < 0) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_prod); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_4 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_size, __pyx_t_4) < 0) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_reshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":434 * return inits * * def he_normal_init(self, shape, fan_in): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(2. / fan_in) * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.he_normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_scale); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":438 * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) * * def update_averages(self, ema, weights, t, max_decay=0.9999): # <<<<<<<<<<<<<< * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_79update_averages(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_79update_averages = {"update_averages", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_79update_averages, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_79update_averages(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_ema = 0; PyObject *__pyx_v_weights = 0; PyObject *__pyx_v_t = 0; PyObject *__pyx_v_max_decay = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("update_averages (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_ema,&__pyx_n_s_weights,&__pyx_n_s_t,&__pyx_n_s_max_decay,0}; PyObject* values[5] = {0,0,0,0,0}; values[4] = ((PyObject *)((PyObject*)__pyx_float_0_9999)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ema)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("update_averages", 0, 4, 5, 1); __PYX_ERR(0, 438, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("update_averages", 0, 4, 5, 2); __PYX_ERR(0, 438, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("update_averages", 0, 4, 5, 3); __PYX_ERR(0, 438, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_decay); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "update_averages") < 0)) __PYX_ERR(0, 438, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_ema = values[1]; __pyx_v_weights = values[2]; __pyx_v_t = values[3]; __pyx_v_max_decay = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("update_averages", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 438, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.update_averages", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_78update_averages(__pyx_self, __pyx_v_self, __pyx_v_ema, __pyx_v_weights, __pyx_v_t, __pyx_v_max_decay); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_78update_averages(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_ema, PyObject *__pyx_v_weights, PyObject *__pyx_v_t, PyObject *__pyx_v_max_decay) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_decay; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __pyx_t_5thinc_8typedefs_weight_t __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__55) __Pyx_RefNannySetupContext("update_averages", 0); __Pyx_TraceCall("update_averages", __pyx_f[0], 438, 0, __PYX_ERR(0, 438, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_ema); /* "thinc/neural/ops.pyx":439 * * def update_averages(self, ema, weights, t, max_decay=0.9999): * cdef weight_t decay = (1.0 + t) / (10.0 + t) # <<<<<<<<<<<<<< * if decay > max_decay: * decay = max_decay */ __pyx_t_1 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_v_t, 1.0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyFloat_AddCObj(__pyx_float_10_0, __pyx_v_t, 10.0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_4 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 439, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_decay = __pyx_t_4; /* "thinc/neural/ops.pyx":440 * def update_averages(self, ema, weights, t, max_decay=0.9999): * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: # <<<<<<<<<<<<<< * decay = max_decay * ema -= (1-decay) * (ema - weights) */ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_decay); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 440, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_v_max_decay, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 440, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 440, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_5) { /* "thinc/neural/ops.pyx":441 * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: * decay = max_decay # <<<<<<<<<<<<<< * ema -= (1-decay) * (ema - weights) * */ __pyx_t_4 = __pyx_PyFloat_AsFloat(__pyx_v_max_decay); if (unlikely((__pyx_t_4 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 441, __pyx_L1_error) __pyx_v_decay = __pyx_t_4; /* "thinc/neural/ops.pyx":440 * def update_averages(self, ema, weights, t, max_decay=0.9999): * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: # <<<<<<<<<<<<<< * decay = max_decay * ema -= (1-decay) * (ema - weights) */ } /* "thinc/neural/ops.pyx":442 * if decay > max_decay: * decay = max_decay * ema -= (1-decay) * (ema - weights) # <<<<<<<<<<<<<< * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, */ __pyx_t_2 = PyFloat_FromDouble((1.0 - __pyx_v_decay)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Subtract(__pyx_v_ema, __pyx_v_weights); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceSubtract(__pyx_v_ema, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 442, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_ema, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":438 * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) * * def update_averages(self, ema, weights, t, max_decay=0.9999): # <<<<<<<<<<<<<< * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.Ops.update_averages", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ema); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":444 * ema -= (1-decay) * (ema - weights) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * mom1 *= beta1 */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_81adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_81adam = {"adam", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_81adam, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_81adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_weights = 0; PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_mom1 = 0; PyObject *__pyx_v_mom2 = 0; PyObject *__pyx_v_beta1 = 0; PyObject *__pyx_v_beta2 = 0; PyObject *__pyx_v_eps = 0; PyObject *__pyx_v_learn_rate = 0; PyObject *__pyx_v_mod_rate = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("adam (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_weights,&__pyx_n_s_gradient,&__pyx_n_s_mom1,&__pyx_n_s_mom2,&__pyx_n_s_beta1,&__pyx_n_s_beta2,&__pyx_n_s_eps,&__pyx_n_s_learn_rate,&__pyx_n_s_mod_rate,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; values[9] = ((PyObject *)((PyObject*)__pyx_float_1_)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 1); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 2); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 3); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 4); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 5); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 6); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_eps)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 7); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_learn_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 8); __PYX_ERR(0, 444, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mod_rate); if (value) { values[9] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "adam") < 0)) __PYX_ERR(0, 444, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_weights = values[1]; __pyx_v_gradient = values[2]; __pyx_v_mom1 = values[3]; __pyx_v_mom2 = values[4]; __pyx_v_beta1 = values[5]; __pyx_v_beta2 = values[6]; __pyx_v_eps = values[7]; __pyx_v_learn_rate = values[8]; __pyx_v_mod_rate = values[9]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 444, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_80adam(__pyx_self, __pyx_v_self, __pyx_v_weights, __pyx_v_gradient, __pyx_v_mom1, __pyx_v_mom2, __pyx_v_beta1, __pyx_v_beta2, __pyx_v_eps, __pyx_v_learn_rate, __pyx_v_mod_rate); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_80adam(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_gradient, PyObject *__pyx_v_mom1, PyObject *__pyx_v_mom2, PyObject *__pyx_v_beta1, PyObject *__pyx_v_beta2, PyObject *__pyx_v_eps, PyObject *__pyx_v_learn_rate, PyObject *__pyx_v_mod_rate) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__56) __Pyx_RefNannySetupContext("adam", 0); __Pyx_TraceCall("adam", __pyx_f[0], 444, 0, __PYX_ERR(0, 444, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_weights); __Pyx_INCREF(__pyx_v_mom1); __Pyx_INCREF(__pyx_v_mom2); /* "thinc/neural/ops.pyx":446 * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, * learn_rate, mod_rate=1.): * mom1 *= beta1 # <<<<<<<<<<<<<< * mom2 *= beta2 * mom1 += gradient * (1.-beta1) */ __pyx_t_1 = PyNumber_InPlaceMultiply(__pyx_v_mom1, __pyx_v_beta1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 446, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_mom1, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":447 * learn_rate, mod_rate=1.): * mom1 *= beta1 * mom2 *= beta2 # <<<<<<<<<<<<<< * mom1 += gradient * (1.-beta1) * mom2 += gradient * gradient * (1.-beta2) */ __pyx_t_1 = PyNumber_InPlaceMultiply(__pyx_v_mom2, __pyx_v_beta2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_mom2, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":448 * mom1 *= beta1 * mom2 *= beta2 * mom1 += gradient * (1.-beta1) # <<<<<<<<<<<<<< * mom2 += gradient * gradient * (1.-beta2) * # Here we assume learn rate is calculated by the caller. */ __pyx_t_1 = __Pyx_PyFloat_SubtractCObj(__pyx_float_1_, __pyx_v_beta1, 1., 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_v_gradient, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_mom1, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_mom1, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":449 * mom2 *= beta2 * mom1 += gradient * (1.-beta1) * mom2 += gradient * gradient * (1.-beta2) # <<<<<<<<<<<<<< * # Here we assume learn rate is calculated by the caller. * # cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t); */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_gradient, __pyx_v_gradient); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyFloat_SubtractCObj(__pyx_float_1_, __pyx_v_beta2, 1., 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_mom2, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_mom2, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":452 * # Here we assume learn rate is calculated by the caller. * # cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t); * weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps)) # <<<<<<<<<<<<<< * gradient.fill(0) * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_2 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_mom2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_mom2); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_v_mod_rate, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_v_eps); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_v_mom1, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_v_learn_rate, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_InPlaceSubtract(__pyx_v_weights, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_weights, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":453 * # cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t); * weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps)) * gradient.fill(0) # <<<<<<<<<<<<<< * * def clip_gradient(self, gradient, threshold): */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_gradient, __pyx_n_s_fill); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 453, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_int_0) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_int_0); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 453, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":444 * ema -= (1-decay) * (ema - weights) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * mom1 *= beta1 */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.Ops.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_weights); __Pyx_XDECREF(__pyx_v_mom1); __Pyx_XDECREF(__pyx_v_mom2); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":455 * gradient.fill(0) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_83clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_83clip_gradient = {"clip_gradient", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_83clip_gradient, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_83clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_threshold = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("clip_gradient (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_gradient,&__pyx_n_s_threshold,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, 1); __PYX_ERR(0, 455, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, 2); __PYX_ERR(0, 455, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "clip_gradient") < 0)) __PYX_ERR(0, 455, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_gradient = values[1]; __pyx_v_threshold = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 455, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_82clip_gradient(__pyx_self, __pyx_v_self, __pyx_v_gradient, __pyx_v_threshold); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_82clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_threshold) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_grad_norm = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__57) __Pyx_RefNannySetupContext("clip_gradient", 0); __Pyx_TraceCall("clip_gradient", __pyx_f[0], 455, 0, __PYX_ERR(0, 455, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_gradient); /* "thinc/neural/ops.pyx":456 * * def clip_gradient(self, gradient, threshold): * xp = get_array_module(gradient) # <<<<<<<<<<<<<< * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_gradient) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_gradient); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_xp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":457 * def clip_gradient(self, gradient, threshold): * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) # <<<<<<<<<<<<<< * if grad_norm >= threshold: * gradient *= threshold / grad_norm */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_linalg); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_norm); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_gradient) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_gradient); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_grad_norm = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":458 * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * gradient *= threshold / grad_norm * */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_grad_norm, __pyx_v_threshold, Py_GE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 458, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 458, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_4) { /* "thinc/neural/ops.pyx":459 * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: * gradient *= threshold / grad_norm # <<<<<<<<<<<<<< * * def logloss(self, y_true, y_pred): */ __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_v_threshold, __pyx_v_grad_norm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 459, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_gradient, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 459, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_gradient, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":458 * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * gradient *= threshold / grad_norm * */ } /* "thinc/neural/ops.pyx":455 * gradient.fill(0) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.Ops.clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_grad_norm); __Pyx_XDECREF(__pyx_v_gradient); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":461 * gradient *= threshold / grad_norm * * def logloss(self, y_true, y_pred): # <<<<<<<<<<<<<< * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_85logloss(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3Ops_85logloss = {"logloss", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3Ops_85logloss, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3Ops_85logloss(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_y_true = 0; PyObject *__pyx_v_y_pred = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("logloss (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_y_true,&__pyx_n_s_y_pred,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y_true)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("logloss", 1, 3, 3, 1); __PYX_ERR(0, 461, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y_pred)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("logloss", 1, 3, 3, 2); __PYX_ERR(0, 461, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logloss") < 0)) __PYX_ERR(0, 461, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_y_true = values[1]; __pyx_v_y_pred = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("logloss", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 461, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.Ops.logloss", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_3Ops_84logloss(__pyx_self, __pyx_v_self, __pyx_v_y_true, __pyx_v_y_pred); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_3Ops_84logloss(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred) { PyObject *__pyx_v_log_yp = NULL; PyObject *__pyx_v_loss = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__58) __Pyx_RefNannySetupContext("logloss", 0); __Pyx_TraceCall("logloss", __pyx_f[0], 461, 0, __PYX_ERR(0, 461, __pyx_L1_error)); /* "thinc/neural/ops.pyx":462 * * def logloss(self, y_true, y_pred): * log_yp = self.xp.log(y_pred + 1e-8) # <<<<<<<<<<<<<< * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) * return -loss */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 462, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_log); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 462, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyFloat_AddObjC(__pyx_v_y_pred, __pyx_float_1eneg_8, 1e-8, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 462, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 462, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_log_yp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":463 * def logloss(self, y_true, y_pred): * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) # <<<<<<<<<<<<<< * return -loss * */ __pyx_t_1 = PyNumber_Multiply(__pyx_v_y_true, __pyx_v_log_yp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_y_true, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_log); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_y_pred, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyFloat_AddObjC(__pyx_t_4, __pyx_float_1eneg_8, 1e-8, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Multiply(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_loss = __pyx_t_2; __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":464 * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) * return -loss # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyNumber_Negative(__pyx_v_loss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":461 * gradient *= threshold / grad_norm * * def logloss(self, y_true, y_pred): # <<<<<<<<<<<<<< * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.Ops.logloss", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_log_yp); __Pyx_XDECREF(__pyx_v_loss); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":471 * xp = numpy * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_1asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_1asarray = {"asarray", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_1asarray, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_1asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_data = 0; PyObject *__pyx_v_dtype = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("asarray (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_data,&__pyx_n_s_dtype,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, 1); __PYX_ERR(0, 471, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "asarray") < 0)) __PYX_ERR(0, 471, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_data = values[1]; __pyx_v_dtype = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 471, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_asarray(__pyx_self, __pyx_v_self, __pyx_v_data, __pyx_v_dtype); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_data, PyObject *__pyx_v_dtype) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__59) __Pyx_RefNannySetupContext("asarray", 0); __Pyx_TraceCall("asarray", __pyx_f[0], 471, 0, __PYX_ERR(0, 471, __pyx_L1_error)); /* "thinc/neural/ops.pyx":472 * * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): # <<<<<<<<<<<<<< * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 472, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 472, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = PyObject_IsInstance(__pyx_v_data, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 472, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":473 * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): * if dtype is not None: # <<<<<<<<<<<<<< * return self.xp.asarray(data, dtype=dtype) * else: */ __pyx_t_4 = (__pyx_v_dtype != Py_None); __pyx_t_3 = (__pyx_t_4 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":474 * if isinstance(data, self.xp.ndarray): * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) # <<<<<<<<<<<<<< * else: * return self.xp.asarray(data) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_data); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 474, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":473 * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): * if dtype is not None: # <<<<<<<<<<<<<< * return self.xp.asarray(data, dtype=dtype) * else: */ } /* "thinc/neural/ops.pyx":476 * return self.xp.asarray(data, dtype=dtype) * else: * return self.xp.asarray(data) # <<<<<<<<<<<<<< * elif hasattr(data, 'numpy'): * # Handles PyTorch Tensor */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 476, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 476, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 476, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":472 * * def asarray(self, data, dtype=None): * if isinstance(data, self.xp.ndarray): # <<<<<<<<<<<<<< * if dtype is not None: * return self.xp.asarray(data, dtype=dtype) */ } /* "thinc/neural/ops.pyx":477 * else: * return self.xp.asarray(data) * elif hasattr(data, 'numpy'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensor * return data.numpy() */ __pyx_t_3 = __Pyx_HasAttr(__pyx_v_data, __pyx_n_s_numpy); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 477, __pyx_L1_error) __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":479 * elif hasattr(data, 'numpy'): * # Handles PyTorch Tensor * return data.numpy() # <<<<<<<<<<<<<< * elif hasattr(data, "get"): * return data.get() */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":477 * else: * return self.xp.asarray(data) * elif hasattr(data, 'numpy'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensor * return data.numpy() */ } /* "thinc/neural/ops.pyx":480 * # Handles PyTorch Tensor * return data.numpy() * elif hasattr(data, "get"): # <<<<<<<<<<<<<< * return data.get() * elif dtype is not None: */ __pyx_t_4 = __Pyx_HasAttr(__pyx_v_data, __pyx_n_s_get); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 480, __pyx_L1_error) __pyx_t_3 = (__pyx_t_4 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":481 * return data.numpy() * elif hasattr(data, "get"): * return data.get() # <<<<<<<<<<<<<< * elif dtype is not None: * return self.xp.array(data, dtype=dtype) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_data, __pyx_n_s_get); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 481, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_6 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 481, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":480 * # Handles PyTorch Tensor * return data.numpy() * elif hasattr(data, "get"): # <<<<<<<<<<<<<< * return data.get() * elif dtype is not None: */ } /* "thinc/neural/ops.pyx":482 * elif hasattr(data, "get"): * return data.get() * elif dtype is not None: # <<<<<<<<<<<<<< * return self.xp.array(data, dtype=dtype) * else: */ __pyx_t_3 = (__pyx_v_dtype != Py_None); __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":483 * return data.get() * elif dtype is not None: * return self.xp.array(data, dtype=dtype) # <<<<<<<<<<<<<< * else: * return self.xp.array(data) */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_data); __Pyx_GIVEREF(__pyx_v_data); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_data); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 483, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":482 * elif hasattr(data, "get"): * return data.get() * elif dtype is not None: # <<<<<<<<<<<<<< * return self.xp.array(data, dtype=dtype) * else: */ } /* "thinc/neural/ops.pyx":485 * return self.xp.array(data, dtype=dtype) * else: * return self.xp.array(data) # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_v_data) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_data); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":471 * xp = numpy * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":488 * * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_3allocate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_3allocate = {"allocate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_3allocate, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_3allocate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_shape = 0; PyObject *__pyx_v_dtype = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("allocate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_shape,&__pyx_n_s_dtype,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject*)__pyx_n_s_float32)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("allocate", 0, 2, 3, 1); __PYX_ERR(0, 488, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "allocate") < 0)) __PYX_ERR(0, 488, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_shape = values[1]; __pyx_v_dtype = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("allocate", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 488, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.allocate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_2allocate(__pyx_self, __pyx_v_self, __pyx_v_shape, __pyx_v_dtype); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_2allocate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_shape, PyObject *__pyx_v_dtype) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__60) __Pyx_RefNannySetupContext("allocate", 0); __Pyx_TraceCall("allocate", __pyx_f[0], 488, 0, __PYX_ERR(0, 488, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_shape); /* "thinc/neural/ops.pyx":489 * * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): # <<<<<<<<<<<<<< * shape = (shape,) * return self.xp.zeros(shape, dtype=dtype) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_integer_types); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_IsInstance(__pyx_v_shape, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 489, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":490 * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): * shape = (shape,) # <<<<<<<<<<<<<< * return self.xp.zeros(shape, dtype=dtype) * */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 490, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __Pyx_DECREF_SET(__pyx_v_shape, __pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":489 * * def allocate(self, shape, dtype='float32'): * if isinstance(shape, integer_types): # <<<<<<<<<<<<<< * shape = (shape,) * return self.xp.zeros(shape, dtype=dtype) */ } /* "thinc/neural/ops.pyx":491 * if isinstance(shape, integer_types): * shape = (shape,) * return self.xp.zeros(shape, dtype=dtype) # <<<<<<<<<<<<<< * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 491, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":488 * * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.allocate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_shape); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":493 * return self.xp.zeros(shape, dtype=dtype) * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): # <<<<<<<<<<<<<< * VecVec.add_i(x.data, * y.data, scale, x.shape[0]) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_5inplace_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_5inplace_add = {"inplace_add", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_5inplace_add, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_5inplace_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; float __pyx_v_scale; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("inplace_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_scale,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("inplace_add", 0, 3, 4, 1); __PYX_ERR(0, 493, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("inplace_add", 0, 3, 4, 2); __PYX_ERR(0, 493, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "inplace_add") < 0)) __PYX_ERR(0, 493, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = ((PyArrayObject *)values[1]); __pyx_v_y = ((PyArrayObject *)values[2]); if (values[3]) { __pyx_v_scale = __pyx_PyFloat_AsFloat(values[3]); if (unlikely((__pyx_v_scale == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 493, __pyx_L3_error) } else { __pyx_v_scale = ((float)((double)1.0)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("inplace_add", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 493, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.inplace_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) __PYX_ERR(0, 493, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) __PYX_ERR(0, 493, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_4inplace_add(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_scale); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_4inplace_add(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, float __pyx_v_scale) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__61) __Pyx_RefNannySetupContext("inplace_add", 0); __Pyx_TraceCall("inplace_add", __pyx_f[0], 493, 0, __PYX_ERR(0, 493, __pyx_L1_error)); /* "thinc/neural/ops.pyx":494 * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): * VecVec.add_i(x.data, # <<<<<<<<<<<<<< * y.data, scale, x.shape[0]) * */ __pyx_f_5thinc_6linalg_6VecVec_add_i(((float *)__pyx_v_x->data), ((float *)__pyx_v_y->data), __pyx_v_scale, (__pyx_v_x->dimensions[0])); /* "thinc/neural/ops.pyx":493 * return self.xp.zeros(shape, dtype=dtype) * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): # <<<<<<<<<<<<<< * VecVec.add_i(x.data, * y.data, scale, x.shape[0]) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.inplace_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":497 * y.data, scale, x.shape[0]) * * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): # <<<<<<<<<<<<<< * assert x.shape[0] == y.shape[0] * assert x.shape[2] == y.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_7matmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_7matmul = {"matmul", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_7matmul, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_7matmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_x = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_y = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("matmul (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, 1); __PYX_ERR(0, 497, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, 2); __PYX_ERR(0, 497, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "matmul") < 0)) __PYX_ERR(0, 497, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_x.memview)) __PYX_ERR(0, 497, __pyx_L3_error) __pyx_v_y = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_y.memview)) __PYX_ERR(0, 497, __pyx_L3_error) __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 497, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.matmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_6matmul(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_6matmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, PyObject *__pyx_v_out) { PyArrayObject *__pyx_v_out_array = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; __Pyx_memviewslice __pyx_t_12 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__62) __Pyx_RefNannySetupContext("matmul", 0); __Pyx_TraceCall("matmul", __pyx_f[0], 497, 0, __PYX_ERR(0, 497, __pyx_L1_error)); /* "thinc/neural/ops.pyx":498 * * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): * assert x.shape[0] == y.shape[0] # <<<<<<<<<<<<<< * assert x.shape[2] == y.shape[1] * cdef np.ndarray out_array */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_x.shape[0]) == (__pyx_v_y.shape[0])) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 498, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":499 * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): * assert x.shape[0] == y.shape[0] * assert x.shape[2] == y.shape[1] # <<<<<<<<<<<<<< * cdef np.ndarray out_array * if out is None: */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_x.shape[2]) == (__pyx_v_y.shape[1])) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 499, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":501 * assert x.shape[2] == y.shape[1] * cdef np.ndarray out_array * if out is None: # <<<<<<<<<<<<<< * out_array = self.allocate((x.shape[0], x.shape[1], y.shape[2])) * else: */ __pyx_t_1 = (__pyx_v_out == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":502 * cdef np.ndarray out_array * if out is None: * out_array = self.allocate((x.shape[0], x.shape[1], y.shape[2])) # <<<<<<<<<<<<<< * else: * out_array = self.xp.asarray(out) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyInt_FromSsize_t((__pyx_v_x.shape[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t((__pyx_v_x.shape[1])); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyInt_FromSsize_t((__pyx_v_y.shape[2])); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_7, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_8); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 502, __pyx_L1_error) __pyx_v_out_array = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":501 * assert x.shape[2] == y.shape[1] * cdef np.ndarray out_array * if out is None: # <<<<<<<<<<<<<< * out_array = self.allocate((x.shape[0], x.shape[1], y.shape[2])) * else: */ goto __pyx_L3; } /* "thinc/neural/ops.pyx":504 * out_array = self.allocate((x.shape[0], x.shape[1], y.shape[2])) * else: * out_array = self.xp.asarray(out) # <<<<<<<<<<<<<< * assert out_array.shape[0] == x.shape[0] * assert out_array.shape[1] == x.shape[1] */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_asarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_4, __pyx_v_out) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_v_out); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 504, __pyx_L1_error) __pyx_v_out_array = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "thinc/neural/ops.pyx":505 * else: * out_array = self.xp.asarray(out) * assert out_array.shape[0] == x.shape[0] # <<<<<<<<<<<<<< * assert out_array.shape[1] == x.shape[1] * assert out_array.shape[2] == y.shape[2] */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_array->dimensions[0]) == (__pyx_v_x.shape[0])) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 505, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":506 * out_array = self.xp.asarray(out) * assert out_array.shape[0] == x.shape[0] * assert out_array.shape[1] == x.shape[1] # <<<<<<<<<<<<<< * assert out_array.shape[2] == y.shape[2] * for i in range(x.shape[0]): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_array->dimensions[1]) == (__pyx_v_x.shape[1])) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 506, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":507 * assert out_array.shape[0] == x.shape[0] * assert out_array.shape[1] == x.shape[1] * assert out_array.shape[2] == y.shape[2] # <<<<<<<<<<<<<< * for i in range(x.shape[0]): * blis.py.gemm(x[i], y[i], out=out_array[i]) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_array->dimensions[2]) == (__pyx_v_y.shape[2])) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 507, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":508 * assert out_array.shape[1] == x.shape[1] * assert out_array.shape[2] == y.shape[2] * for i in range(x.shape[0]): # <<<<<<<<<<<<<< * blis.py.gemm(x[i], y[i], out=out_array[i]) * return out_array */ __pyx_t_9 = (__pyx_v_x.shape[0]); __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "thinc/neural/ops.pyx":509 * assert out_array.shape[2] == y.shape[2] * for i in range(x.shape[0]): * blis.py.gemm(x[i], y[i], out=out_array[i]) # <<<<<<<<<<<<<< * return out_array * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_blis); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_py); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_gemm); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_12.data = __pyx_v_x.data; __pyx_t_12.memview = __pyx_v_x.memview; __PYX_INC_MEMVIEW(&__pyx_t_12, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_shape = __pyx_v_x.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_x.strides[0]; if (__pyx_tmp_idx < 0) __pyx_tmp_idx += __pyx_tmp_shape; if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 509, __pyx_L1_error) } __pyx_t_12.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_12.shape[0] = __pyx_v_x.shape[1]; __pyx_t_12.strides[0] = __pyx_v_x.strides[1]; __pyx_t_12.suboffsets[0] = -1; __pyx_t_12.shape[1] = __pyx_v_x.shape[2]; __pyx_t_12.strides[1] = __pyx_v_x.strides[2]; __pyx_t_12.suboffsets[1] = -1; __pyx_t_8 = __pyx_memoryview_fromslice(__pyx_t_12, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __pyx_t_12.memview = NULL; __pyx_t_12.data = NULL; __pyx_t_12.data = __pyx_v_y.data; __pyx_t_12.memview = __pyx_v_y.memview; __PYX_INC_MEMVIEW(&__pyx_t_12, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_shape = __pyx_v_y.shape[0]; Py_ssize_t __pyx_tmp_stride = __pyx_v_y.strides[0]; if (__pyx_tmp_idx < 0) __pyx_tmp_idx += __pyx_tmp_shape; if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 0)"); __PYX_ERR(0, 509, __pyx_L1_error) } __pyx_t_12.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_12.shape[0] = __pyx_v_y.shape[1]; __pyx_t_12.strides[0] = __pyx_v_y.strides[1]; __pyx_t_12.suboffsets[0] = -1; __pyx_t_12.shape[1] = __pyx_v_y.shape[2]; __pyx_t_12.strides[1] = __pyx_v_y.strides[2]; __pyx_t_12.suboffsets[1] = -1; __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_12, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __pyx_t_12.memview = NULL; __pyx_t_12.data = NULL; __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_8 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = __Pyx_GetItemInt(((PyObject *)__pyx_v_out_array), __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_out, __pyx_t_8) < 0) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, __pyx_t_4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } /* "thinc/neural/ops.pyx":510 * for i in range(x.shape[0]): * blis.py.gemm(x[i], y[i], out=out_array[i]) * return out_array # <<<<<<<<<<<<<< * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_out_array)); __pyx_r = ((PyObject *)__pyx_v_out_array); goto __pyx_L0; /* "thinc/neural/ops.pyx":497 * y.data, scale, x.shape[0]) * * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): # <<<<<<<<<<<<<< * assert x.shape[0] == y.shape[0] * assert x.shape[2] == y.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.matmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out_array); __PYX_XDEC_MEMVIEW(&__pyx_v_x, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":512 * return out_array * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, # <<<<<<<<<<<<<< * out=None): * cdef int m */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_9gemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_9gemm = {"gemm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_9gemm, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_9gemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_x = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_y = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_trans1 = 0; PyObject *__pyx_v_trans2 = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gemm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_trans1,&__pyx_n_s_trans2,&__pyx_n_s_out,0}; PyObject* values[6] = {0,0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); values[4] = ((PyObject *)((PyObject *)Py_False)); /* "thinc/neural/ops.pyx":513 * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, * out=None): # <<<<<<<<<<<<<< * cdef int m * if trans1: */ values[5] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, 1); __PYX_ERR(0, 512, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, 2); __PYX_ERR(0, 512, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_trans1); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_trans2); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gemm") < 0)) __PYX_ERR(0, 512, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_x.memview)) __PYX_ERR(0, 512, __pyx_L3_error) __pyx_v_y = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[2], 0); if (unlikely(!__pyx_v_y.memview)) __PYX_ERR(0, 512, __pyx_L3_error) __pyx_v_trans1 = values[3]; __pyx_v_trans2 = values[4]; __pyx_v_out = values[5]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 512, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.gemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_8gemm(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_trans1, __pyx_v_trans2, __pyx_v_out); /* "thinc/neural/ops.pyx":512 * return out_array * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, # <<<<<<<<<<<<<< * out=None): * cdef int m */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_8gemm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, PyObject *__pyx_v_trans1, PyObject *__pyx_v_trans2, PyObject *__pyx_v_out) { int __pyx_v_m; int __pyx_v_n; PyArrayObject *__pyx_v_out_array = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__63) __Pyx_RefNannySetupContext("gemm", 0); __Pyx_TraceCall("gemm", __pyx_f[0], 512, 0, __PYX_ERR(0, 512, __pyx_L1_error)); /* "thinc/neural/ops.pyx":515 * out=None): * cdef int m * if trans1: # <<<<<<<<<<<<<< * m = x.shape[1] * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_trans1); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 515, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":516 * cdef int m * if trans1: * m = x.shape[1] # <<<<<<<<<<<<<< * else: * m = x.shape[0] */ __pyx_v_m = (__pyx_v_x.shape[1]); /* "thinc/neural/ops.pyx":515 * out=None): * cdef int m * if trans1: # <<<<<<<<<<<<<< * m = x.shape[1] * else: */ goto __pyx_L3; } /* "thinc/neural/ops.pyx":518 * m = x.shape[1] * else: * m = x.shape[0] # <<<<<<<<<<<<<< * cdef int n * if trans2: */ /*else*/ { __pyx_v_m = (__pyx_v_x.shape[0]); } __pyx_L3:; /* "thinc/neural/ops.pyx":520 * m = x.shape[0] * cdef int n * if trans2: # <<<<<<<<<<<<<< * n = y.shape[0] * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_trans2); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 520, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":521 * cdef int n * if trans2: * n = y.shape[0] # <<<<<<<<<<<<<< * else: * n = y.shape[1] */ __pyx_v_n = (__pyx_v_y.shape[0]); /* "thinc/neural/ops.pyx":520 * m = x.shape[0] * cdef int n * if trans2: # <<<<<<<<<<<<<< * n = y.shape[0] * else: */ goto __pyx_L4; } /* "thinc/neural/ops.pyx":523 * n = y.shape[0] * else: * n = y.shape[1] # <<<<<<<<<<<<<< * cdef np.ndarray out_array * if out is None: */ /*else*/ { __pyx_v_n = (__pyx_v_y.shape[1]); } __pyx_L4:; /* "thinc/neural/ops.pyx":525 * n = y.shape[1] * cdef np.ndarray out_array * if out is None: # <<<<<<<<<<<<<< * out_array = self.allocate((m, n)) * else: */ __pyx_t_1 = (__pyx_v_out == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":526 * cdef np.ndarray out_array * if out is None: * out_array = self.allocate((m, n)) # <<<<<<<<<<<<<< * else: * out_array = self.xp.asarray(out) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_m); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_n); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_7); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 526, __pyx_L1_error) __pyx_v_out_array = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":525 * n = y.shape[1] * cdef np.ndarray out_array * if out is None: # <<<<<<<<<<<<<< * out_array = self.allocate((m, n)) * else: */ goto __pyx_L5; } /* "thinc/neural/ops.pyx":528 * out_array = self.allocate((m, n)) * else: * out_array = self.xp.asarray(out) # <<<<<<<<<<<<<< * assert out_array.shape[0] == m * assert out_array.shape[1] == n */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 528, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_asarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 528, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_4, __pyx_v_out) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_out); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 528, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 528, __pyx_L1_error) __pyx_v_out_array = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L5:; /* "thinc/neural/ops.pyx":529 * else: * out_array = self.xp.asarray(out) * assert out_array.shape[0] == m # <<<<<<<<<<<<<< * assert out_array.shape[1] == n * blis.py.gemm(x, y, out=out_array, trans1=trans1, trans2=trans2) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_array->dimensions[0]) == __pyx_v_m) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 529, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":530 * out_array = self.xp.asarray(out) * assert out_array.shape[0] == m * assert out_array.shape[1] == n # <<<<<<<<<<<<<< * blis.py.gemm(x, y, out=out_array, trans1=trans1, trans2=trans2) * return out_array */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_array->dimensions[1]) == __pyx_v_n) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 530, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":531 * assert out_array.shape[0] == m * assert out_array.shape[1] == n * blis.py.gemm(x, y, out=out_array, trans1=trans1, trans2=trans2) # <<<<<<<<<<<<<< * return out_array * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_blis); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_py); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_gemm); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_x, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_y, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __pyx_t_7 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_out, ((PyObject *)__pyx_v_out_array)) < 0) __PYX_ERR(0, 531, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_trans1, __pyx_v_trans1) < 0) __PYX_ERR(0, 531, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_trans2, __pyx_v_trans2) < 0) __PYX_ERR(0, 531, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":532 * assert out_array.shape[1] == n * blis.py.gemm(x, y, out=out_array, trans1=trans1, trans2=trans2) * return out_array # <<<<<<<<<<<<<< * * def affine(self, weights, bias, signal): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_out_array)); __pyx_r = ((PyObject *)__pyx_v_out_array); goto __pyx_L0; /* "thinc/neural/ops.pyx":512 * return out_array * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, # <<<<<<<<<<<<<< * out=None): * cdef int m */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.gemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out_array); __PYX_XDEC_MEMVIEW(&__pyx_v_x, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":534 * return out_array * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_11affine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_11affine = {"affine", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_11affine, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_11affine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_weights = 0; PyObject *__pyx_v_bias = 0; PyObject *__pyx_v_signal = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("affine (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_weights,&__pyx_n_s_bias,&__pyx_n_s_signal,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 1); __PYX_ERR(0, 534, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 2); __PYX_ERR(0, 534, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, 3); __PYX_ERR(0, 534, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "affine") < 0)) __PYX_ERR(0, 534, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_weights = values[1]; __pyx_v_bias = values[2]; __pyx_v_signal = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("affine", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 534, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.affine", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_10affine(__pyx_self, __pyx_v_self, __pyx_v_weights, __pyx_v_bias, __pyx_v_signal); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_10affine(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_bias, PyObject *__pyx_v_signal) { PyObject *__pyx_v_dotted = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__64) __Pyx_RefNannySetupContext("affine", 0); __Pyx_TraceCall("affine", __pyx_f[0], 534, 0, __PYX_ERR(0, 534, __pyx_L1_error)); /* "thinc/neural/ops.pyx":535 * * def affine(self, weights, bias, signal): * dotted = self.gemm(signal, weights, trans2=True) # <<<<<<<<<<<<<< * dotted += bias * return dotted */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_gemm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_signal); __Pyx_GIVEREF(__pyx_v_signal); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_signal); __Pyx_INCREF(__pyx_v_weights); __Pyx_GIVEREF(__pyx_v_weights); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_weights); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_trans2, Py_True) < 0) __PYX_ERR(0, 535, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dotted = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":536 * def affine(self, weights, bias, signal): * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias # <<<<<<<<<<<<<< * return dotted * */ __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_dotted, __pyx_v_bias); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 536, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF_SET(__pyx_v_dotted, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":537 * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias * return dotted # <<<<<<<<<<<<<< * * def elu(self, ndarray X, inplace=True): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_dotted); __pyx_r = __pyx_v_dotted; goto __pyx_L0; /* "thinc/neural/ops.pyx":534 * return out_array * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.affine", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dotted); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":539 * return dotted * * def elu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_13elu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_13elu = {"elu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_13elu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_13elu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_X = 0; CYTHON_UNUSED PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("elu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("elu", 0, 2, 3, 1); __PYX_ERR(0, 539, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "elu") < 0)) __PYX_ERR(0, 539, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = ((PyArrayObject *)values[1]); __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("elu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 539, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.elu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) __PYX_ERR(0, 539, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_12elu(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_inplace); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_12elu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, CYTHON_UNUSED PyObject *__pyx_v_inplace) { __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_data; size_t __pyx_v_size; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__65) __Pyx_RefNannySetupContext("elu", 0); __Pyx_TraceCall("elu", __pyx_f[0], 539, 0, __PYX_ERR(0, 539, __pyx_L1_error)); /* "thinc/neural/ops.pyx":540 * * def elu(self, ndarray X, inplace=True): * cdef weight_t* data = X.data # <<<<<<<<<<<<<< * cdef size_t size = X.size * for i in range(size): */ __pyx_v_data = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_X->data); /* "thinc/neural/ops.pyx":541 * def elu(self, ndarray X, inplace=True): * cdef weight_t* data = X.data * cdef size_t size = X.size # <<<<<<<<<<<<<< * for i in range(size): * if data[i] < 0: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 541, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 541, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_2; /* "thinc/neural/ops.pyx":542 * cdef weight_t* data = X.data * cdef size_t size = X.size * for i in range(size): # <<<<<<<<<<<<<< * if data[i] < 0: * data[i] = expf(data[i])-1. */ __pyx_t_2 = __pyx_v_size; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":543 * cdef size_t size = X.size * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = expf(data[i])-1. * */ __pyx_t_5 = (((__pyx_v_data[__pyx_v_i]) < 0.0) != 0); if (__pyx_t_5) { /* "thinc/neural/ops.pyx":544 * for i in range(size): * if data[i] < 0: * data[i] = expf(data[i])-1. # <<<<<<<<<<<<<< * * def selu(self, ndarray X, inplace=True): */ (__pyx_v_data[__pyx_v_i]) = (expf((__pyx_v_data[__pyx_v_i])) - 1.); /* "thinc/neural/ops.pyx":543 * cdef size_t size = X.size * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = expf(data[i])-1. * */ } } /* "thinc/neural/ops.pyx":539 * return dotted * * def elu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.elu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":546 * data[i] = expf(data[i])-1. * * def selu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_15selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_15selu = {"selu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_15selu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_15selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_X = 0; CYTHON_UNUSED PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("selu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("selu", 0, 2, 3, 1); __PYX_ERR(0, 546, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "selu") < 0)) __PYX_ERR(0, 546, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = ((PyArrayObject *)values[1]); __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("selu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 546, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) __PYX_ERR(0, 546, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_14selu(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_inplace); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_14selu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, CYTHON_UNUSED PyObject *__pyx_v_inplace) { __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_data; size_t __pyx_v_size; float __pyx_v_scale; float __pyx_v_alpha; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; int __pyx_t_5; size_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__66) __Pyx_RefNannySetupContext("selu", 0); __Pyx_TraceCall("selu", __pyx_f[0], 546, 0, __PYX_ERR(0, 546, __pyx_L1_error)); /* "thinc/neural/ops.pyx":547 * * def selu(self, ndarray X, inplace=True): * cdef weight_t* data = X.data # <<<<<<<<<<<<<< * cdef size_t size = X.size * cdef float scale = 1.0507009873554805 */ __pyx_v_data = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_X->data); /* "thinc/neural/ops.pyx":548 * def selu(self, ndarray X, inplace=True): * cdef weight_t* data = X.data * cdef size_t size = X.size # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 548, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 548, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_2; /* "thinc/neural/ops.pyx":549 * cdef weight_t* data = X.data * cdef size_t size = X.size * cdef float scale = 1.0507009873554805 # <<<<<<<<<<<<<< * cdef float alpha = 1.6732632423543772 * for i in range(size): */ __pyx_v_scale = 1.0507009873554805; /* "thinc/neural/ops.pyx":550 * cdef size_t size = X.size * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 # <<<<<<<<<<<<<< * for i in range(size): * if data[i] < 0: */ __pyx_v_alpha = 1.6732632423543772; /* "thinc/neural/ops.pyx":551 * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 * for i in range(size): # <<<<<<<<<<<<<< * if data[i] < 0: * data[i] = alpha * (expf(data[i])-1.) */ __pyx_t_2 = __pyx_v_size; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":552 * cdef float alpha = 1.6732632423543772 * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = alpha * (expf(data[i])-1.) * data[i] *= scale */ __pyx_t_5 = (((__pyx_v_data[__pyx_v_i]) < 0.0) != 0); if (__pyx_t_5) { /* "thinc/neural/ops.pyx":553 * for i in range(size): * if data[i] < 0: * data[i] = alpha * (expf(data[i])-1.) # <<<<<<<<<<<<<< * data[i] *= scale * */ (__pyx_v_data[__pyx_v_i]) = (__pyx_v_alpha * (expf((__pyx_v_data[__pyx_v_i])) - 1.)); /* "thinc/neural/ops.pyx":552 * cdef float alpha = 1.6732632423543772 * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = alpha * (expf(data[i])-1.) * data[i] *= scale */ } /* "thinc/neural/ops.pyx":554 * if data[i] < 0: * data[i] = alpha * (expf(data[i])-1.) * data[i] *= scale # <<<<<<<<<<<<<< * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, */ __pyx_t_6 = __pyx_v_i; (__pyx_v_data[__pyx_t_6]) = ((__pyx_v_data[__pyx_t_6]) * __pyx_v_scale); } /* "thinc/neural/ops.pyx":546 * data[i] = expf(data[i])-1. * * def selu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":556 * data[i] *= scale * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_17backprop_selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_17backprop_selu = {"backprop_selu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_17backprop_selu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_17backprop_selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_delta_ = 0; PyArrayObject *__pyx_v_signal_in_ = 0; CYTHON_UNUSED PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_selu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_delta,&__pyx_n_s_signal_in,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; /* "thinc/neural/ops.pyx":557 * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, * inplace=True): # <<<<<<<<<<<<<< * # Backprop the SELU transformation * cdef size_t size = delta_.size */ values[3] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_delta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, 1); __PYX_ERR(0, 556, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal_in)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, 2); __PYX_ERR(0, 556, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_selu") < 0)) __PYX_ERR(0, 556, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_delta_ = ((PyArrayObject *)values[1]); __pyx_v_signal_in_ = ((PyArrayObject *)values[2]); __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 556, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_delta_), __pyx_ptype_5numpy_ndarray, 1, "delta_", 0))) __PYX_ERR(0, 556, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_signal_in_), __pyx_ptype_5numpy_ndarray, 1, "signal_in_", 0))) __PYX_ERR(0, 556, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_16backprop_selu(__pyx_self, __pyx_v_self, __pyx_v_delta_, __pyx_v_signal_in_, __pyx_v_inplace); /* "thinc/neural/ops.pyx":556 * data[i] *= scale * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_16backprop_selu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_delta_, PyArrayObject *__pyx_v_signal_in_, CYTHON_UNUSED PyObject *__pyx_v_inplace) { size_t __pyx_v_size; __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_delta; __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_signal_in; float __pyx_v_scale; float __pyx_v_alpha; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; size_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__67) __Pyx_RefNannySetupContext("backprop_selu", 0); __Pyx_TraceCall("backprop_selu", __pyx_f[0], 556, 0, __PYX_ERR(0, 556, __pyx_L1_error)); /* "thinc/neural/ops.pyx":559 * inplace=True): * # Backprop the SELU transformation * cdef size_t size = delta_.size # <<<<<<<<<<<<<< * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_in = signal_in_.data */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_delta_), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 559, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_2; /* "thinc/neural/ops.pyx":560 * # Backprop the SELU transformation * cdef size_t size = delta_.size * cdef weight_t* delta = delta_.data # <<<<<<<<<<<<<< * cdef const weight_t* signal_in = signal_in_.data * cdef float scale = 1.0507009873554805 */ __pyx_v_delta = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_delta_->data); /* "thinc/neural/ops.pyx":561 * cdef size_t size = delta_.size * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_in = signal_in_.data # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ __pyx_v_signal_in = ((__pyx_t_5thinc_8typedefs_weight_t const *)__pyx_v_signal_in_->data); /* "thinc/neural/ops.pyx":562 * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_in = signal_in_.data * cdef float scale = 1.0507009873554805 # <<<<<<<<<<<<<< * cdef float alpha = 1.6732632423543772 * */ __pyx_v_scale = 1.0507009873554805; /* "thinc/neural/ops.pyx":563 * cdef const weight_t* signal_in = signal_in_.data * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 # <<<<<<<<<<<<<< * * for i in range(size): */ __pyx_v_alpha = 1.6732632423543772; /* "thinc/neural/ops.pyx":565 * cdef float alpha = 1.6732632423543772 * * for i in range(size): # <<<<<<<<<<<<<< * delta[i] *= scale * if signal_in[i] <= 0: */ __pyx_t_2 = __pyx_v_size; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":566 * * for i in range(size): * delta[i] *= scale # <<<<<<<<<<<<<< * if signal_in[i] <= 0: * delta[i] *= alpha * expf(signal_in[i]) */ __pyx_t_5 = __pyx_v_i; (__pyx_v_delta[__pyx_t_5]) = ((__pyx_v_delta[__pyx_t_5]) * __pyx_v_scale); /* "thinc/neural/ops.pyx":567 * for i in range(size): * delta[i] *= scale * if signal_in[i] <= 0: # <<<<<<<<<<<<<< * delta[i] *= alpha * expf(signal_in[i]) * */ __pyx_t_6 = (((__pyx_v_signal_in[__pyx_v_i]) <= 0.0) != 0); if (__pyx_t_6) { /* "thinc/neural/ops.pyx":568 * delta[i] *= scale * if signal_in[i] <= 0: * delta[i] *= alpha * expf(signal_in[i]) # <<<<<<<<<<<<<< * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, */ __pyx_t_5 = __pyx_v_i; (__pyx_v_delta[__pyx_t_5]) = ((__pyx_v_delta[__pyx_t_5]) * (__pyx_v_alpha * expf((__pyx_v_signal_in[__pyx_v_i])))); /* "thinc/neural/ops.pyx":567 * for i in range(size): * delta[i] *= scale * if signal_in[i] <= 0: # <<<<<<<<<<<<<< * delta[i] *= alpha * expf(signal_in[i]) * */ } } /* "thinc/neural/ops.pyx":556 * data[i] *= scale * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":570 * delta[i] *= alpha * expf(signal_in[i]) * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the ELU transformation */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_19backprop_elu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_19backprop_elu = {"backprop_elu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_19backprop_elu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_19backprop_elu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_delta_ = 0; PyArrayObject *__pyx_v_signal_out_ = 0; CYTHON_UNUSED PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_elu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_delta,&__pyx_n_s_signal_out,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; /* "thinc/neural/ops.pyx":571 * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, * inplace=True): # <<<<<<<<<<<<<< * # Backprop the ELU transformation * # Note that this is over the function _output_, not the function */ values[3] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_delta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_elu", 0, 3, 4, 1); __PYX_ERR(0, 570, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_elu", 0, 3, 4, 2); __PYX_ERR(0, 570, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_elu") < 0)) __PYX_ERR(0, 570, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_delta_ = ((PyArrayObject *)values[1]); __pyx_v_signal_out_ = ((PyArrayObject *)values[2]); __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_elu", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 570, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_elu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_delta_), __pyx_ptype_5numpy_ndarray, 1, "delta_", 0))) __PYX_ERR(0, 570, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_signal_out_), __pyx_ptype_5numpy_ndarray, 1, "signal_out_", 0))) __PYX_ERR(0, 570, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_18backprop_elu(__pyx_self, __pyx_v_self, __pyx_v_delta_, __pyx_v_signal_out_, __pyx_v_inplace); /* "thinc/neural/ops.pyx":570 * delta[i] *= alpha * expf(signal_in[i]) * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the ELU transformation */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_18backprop_elu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_delta_, PyArrayObject *__pyx_v_signal_out_, CYTHON_UNUSED PyObject *__pyx_v_inplace) { size_t __pyx_v_size; __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_delta; __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_signal_out; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; int __pyx_t_5; size_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__68) __Pyx_RefNannySetupContext("backprop_elu", 0); __Pyx_TraceCall("backprop_elu", __pyx_f[0], 570, 0, __PYX_ERR(0, 570, __pyx_L1_error)); /* "thinc/neural/ops.pyx":575 * # Note that this is over the function _output_, not the function * # _input_! * cdef size_t size = delta_.size # <<<<<<<<<<<<<< * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_out = signal_out_.data */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_delta_), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 575, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_2; /* "thinc/neural/ops.pyx":576 * # _input_! * cdef size_t size = delta_.size * cdef weight_t* delta = delta_.data # <<<<<<<<<<<<<< * cdef const weight_t* signal_out = signal_out_.data * for i in range(size): */ __pyx_v_delta = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_delta_->data); /* "thinc/neural/ops.pyx":577 * cdef size_t size = delta_.size * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_out = signal_out_.data # <<<<<<<<<<<<<< * for i in range(size): * if signal_out[i] <= 0: */ __pyx_v_signal_out = ((__pyx_t_5thinc_8typedefs_weight_t const *)__pyx_v_signal_out_->data); /* "thinc/neural/ops.pyx":578 * cdef weight_t* delta = delta_.data * cdef const weight_t* signal_out = signal_out_.data * for i in range(size): # <<<<<<<<<<<<<< * if signal_out[i] <= 0: * delta[i] *= signal_out[i] + 1. */ __pyx_t_2 = __pyx_v_size; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":579 * cdef const weight_t* signal_out = signal_out_.data * for i in range(size): * if signal_out[i] <= 0: # <<<<<<<<<<<<<< * delta[i] *= signal_out[i] + 1. * */ __pyx_t_5 = (((__pyx_v_signal_out[__pyx_v_i]) <= 0.0) != 0); if (__pyx_t_5) { /* "thinc/neural/ops.pyx":580 * for i in range(size): * if signal_out[i] <= 0: * delta[i] *= signal_out[i] + 1. # <<<<<<<<<<<<<< * * def relu(self, ndarray X, inplace=False): */ __pyx_t_6 = __pyx_v_i; (__pyx_v_delta[__pyx_t_6]) = ((__pyx_v_delta[__pyx_t_6]) * ((__pyx_v_signal_out[__pyx_v_i]) + 1.)); /* "thinc/neural/ops.pyx":579 * cdef const weight_t* signal_out = signal_out_.data * for i in range(size): * if signal_out[i] <= 0: # <<<<<<<<<<<<<< * delta[i] *= signal_out[i] + 1. * */ } } /* "thinc/neural/ops.pyx":570 * delta[i] *= alpha * expf(signal_in[i]) * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the ELU transformation */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_elu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":582 * delta[i] *= signal_out[i] + 1. * * def relu(self, ndarray X, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_21relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_21relu = {"relu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_21relu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_21relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_X = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("relu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("relu", 0, 2, 3, 1); __PYX_ERR(0, 582, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "relu") < 0)) __PYX_ERR(0, 582, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = ((PyArrayObject *)values[1]); __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("relu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 582, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) __PYX_ERR(0, 582, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_20relu(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_inplace); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_20relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_X, PyObject *__pyx_v_inplace) { PyArrayObject *__pyx_v_out = 0; __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_data; size_t __pyx_v_size; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; size_t __pyx_t_7; size_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__69) __Pyx_RefNannySetupContext("relu", 0); __Pyx_TraceCall("relu", __pyx_f[0], 582, 0, __PYX_ERR(0, 582, __pyx_L1_error)); /* "thinc/neural/ops.pyx":583 * * def relu(self, ndarray X, inplace=False): * cdef np.ndarray out = X if inplace else X.copy() # <<<<<<<<<<<<<< * cdef weight_t* data = out.data * cdef size_t size = out.size */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 583, __pyx_L1_error) if (__pyx_t_2) { __Pyx_INCREF(((PyObject *)__pyx_v_X)); __pyx_t_1 = ((PyObject *)__pyx_v_X); } else { __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X), __pyx_n_s_copy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 583, __pyx_L1_error) __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } __pyx_v_out = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":584 * def relu(self, ndarray X, inplace=False): * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data # <<<<<<<<<<<<<< * cdef size_t size = out.size * for i in range(size): */ __pyx_v_data = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_out->data); /* "thinc/neural/ops.pyx":585 * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data * cdef size_t size = out.size # <<<<<<<<<<<<<< * for i in range(size): * if data[i] < 0: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_out), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_6 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 585, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_6; /* "thinc/neural/ops.pyx":586 * cdef weight_t* data = out.data * cdef size_t size = out.size * for i in range(size): # <<<<<<<<<<<<<< * if data[i] < 0: * data[i] = 0. */ __pyx_t_6 = __pyx_v_size; __pyx_t_7 = __pyx_t_6; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "thinc/neural/ops.pyx":587 * cdef size_t size = out.size * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = 0. * return out */ __pyx_t_2 = (((__pyx_v_data[__pyx_v_i]) < 0.0) != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":588 * for i in range(size): * if data[i] < 0: * data[i] = 0. # <<<<<<<<<<<<<< * return out * */ (__pyx_v_data[__pyx_v_i]) = 0.; /* "thinc/neural/ops.pyx":587 * cdef size_t size = out.size * for i in range(size): * if data[i] < 0: # <<<<<<<<<<<<<< * data[i] = 0. * return out */ } } /* "thinc/neural/ops.pyx":589 * if data[i] < 0: * data[i] = 0. * return out # <<<<<<<<<<<<<< * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = ((PyObject *)__pyx_v_out); goto __pyx_L0; /* "thinc/neural/ops.pyx":582 * delta[i] *= signal_out[i] + 1. * * def relu(self, ndarray X, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":591 * return out * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_23backprop_relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_23backprop_relu = {"backprop_relu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_23backprop_relu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_23backprop_relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_dY = 0; PyArrayObject *__pyx_v_Y = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_relu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_Y,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, 1); __PYX_ERR(0, 591, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, 2); __PYX_ERR(0, 591, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_relu") < 0)) __PYX_ERR(0, 591, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_dY = ((PyArrayObject *)values[1]); __pyx_v_Y = ((PyArrayObject *)values[2]); __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 591, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dY), __pyx_ptype_5numpy_ndarray, 1, "dY", 0))) __PYX_ERR(0, 591, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) __PYX_ERR(0, 591, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_22backprop_relu(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_Y, __pyx_v_inplace); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_22backprop_relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_dY, PyArrayObject *__pyx_v_Y, PyObject *__pyx_v_inplace) { PyArrayObject *__pyx_v_dX = 0; size_t __pyx_v_size; __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_dX_ptr; __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_Y_ptr; size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; size_t __pyx_t_7; size_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__70) __Pyx_RefNannySetupContext("backprop_relu", 0); __Pyx_TraceCall("backprop_relu", __pyx_f[0], 591, 0, __PYX_ERR(0, 591, __pyx_L1_error)); /* "thinc/neural/ops.pyx":592 * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): * cdef np.ndarray dX = dY if inplace else dY.copy() # <<<<<<<<<<<<<< * cdef size_t size = dX.size * cdef weight_t* dX_ptr = dX.data */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 592, __pyx_L1_error) if (__pyx_t_2) { __Pyx_INCREF(((PyObject *)__pyx_v_dY)); __pyx_t_1 = ((PyObject *)__pyx_v_dY); } else { __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_dY), __pyx_n_s_copy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 592, __pyx_L1_error) __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } __pyx_v_dX = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":593 * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size # <<<<<<<<<<<<<< * cdef weight_t* dX_ptr = dX.data * cdef const weight_t* Y_ptr = Y.data */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_dX), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_6 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 593, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_size = __pyx_t_6; /* "thinc/neural/ops.pyx":594 * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size * cdef weight_t* dX_ptr = dX.data # <<<<<<<<<<<<<< * cdef const weight_t* Y_ptr = Y.data * for i in range(size): */ __pyx_v_dX_ptr = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_dX->data); /* "thinc/neural/ops.pyx":595 * cdef size_t size = dX.size * cdef weight_t* dX_ptr = dX.data * cdef const weight_t* Y_ptr = Y.data # <<<<<<<<<<<<<< * for i in range(size): * if Y_ptr[i] <= 0: */ __pyx_v_Y_ptr = ((__pyx_t_5thinc_8typedefs_weight_t const *)__pyx_v_Y->data); /* "thinc/neural/ops.pyx":596 * cdef weight_t* dX_ptr = dX.data * cdef const weight_t* Y_ptr = Y.data * for i in range(size): # <<<<<<<<<<<<<< * if Y_ptr[i] <= 0: * dX_ptr[i] = 0. */ __pyx_t_6 = __pyx_v_size; __pyx_t_7 = __pyx_t_6; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "thinc/neural/ops.pyx":597 * cdef const weight_t* Y_ptr = Y.data * for i in range(size): * if Y_ptr[i] <= 0: # <<<<<<<<<<<<<< * dX_ptr[i] = 0. * return dX */ __pyx_t_2 = (((__pyx_v_Y_ptr[__pyx_v_i]) <= 0.0) != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":598 * for i in range(size): * if Y_ptr[i] <= 0: * dX_ptr[i] = 0. # <<<<<<<<<<<<<< * return dX * */ (__pyx_v_dX_ptr[__pyx_v_i]) = 0.; /* "thinc/neural/ops.pyx":597 * cdef const weight_t* Y_ptr = Y.data * for i in range(size): * if Y_ptr[i] <= 0: # <<<<<<<<<<<<<< * dX_ptr[i] = 0. * return dX */ } } /* "thinc/neural/ops.pyx":599 * if Y_ptr[i] <= 0: * dX_ptr[i] = 0. * return dX # <<<<<<<<<<<<<< * * def maxout(self, const float[:, :, ::1] py_cands): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_dX)); __pyx_r = ((PyObject *)__pyx_v_dX); goto __pyx_L0; /* "thinc/neural/ops.pyx":591 * return out * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_dX); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":601 * return dX * * def maxout(self, const float[:, :, ::1] py_cands): # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_25maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_25maxout = {"maxout", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_25maxout, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_25maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_py_cands = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maxout (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_py_cands,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_py_cands)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maxout", 1, 2, 2, 1); __PYX_ERR(0, 601, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maxout") < 0)) __PYX_ERR(0, 601, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_py_cands = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_py_cands.memview)) __PYX_ERR(0, 601, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maxout", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 601, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_24maxout(__pyx_self, __pyx_v_self, __pyx_v_py_cands); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_24maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_py_cands) { CYTHON_UNUSED struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; int __pyx_v_B; int __pyx_v_O; int __pyx_v_P; PyArrayObject *__pyx_v_best = 0; PyArrayObject *__pyx_v_which = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__71) __Pyx_RefNannySetupContext("maxout", 0); __Pyx_TraceCall("maxout", __pyx_f[0], 601, 0, __PYX_ERR(0, 601, __pyx_L1_error)); /* "thinc/neural/ops.pyx":602 * * def maxout(self, const float[:, :, ::1] py_cands): * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * cdef int B = py_cands.shape[0] * cdef int O = py_cands.shape[1] */ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":603 * def maxout(self, const float[:, :, ::1] py_cands): * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] # <<<<<<<<<<<<<< * cdef int O = py_cands.shape[1] * cdef int P = py_cands.shape[2] */ __pyx_v_B = (__pyx_v_py_cands.shape[0]); /* "thinc/neural/ops.pyx":604 * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] * cdef int O = py_cands.shape[1] # <<<<<<<<<<<<<< * cdef int P = py_cands.shape[2] * */ __pyx_v_O = (__pyx_v_py_cands.shape[1]); /* "thinc/neural/ops.pyx":605 * cdef int B = py_cands.shape[0] * cdef int O = py_cands.shape[1] * cdef int P = py_cands.shape[2] # <<<<<<<<<<<<<< * * cdef ndarray best = numpy.zeros((B, O), dtype='float32', order='C') */ __pyx_v_P = (__pyx_v_py_cands.shape[2]); /* "thinc/neural/ops.pyx":607 * cdef int P = py_cands.shape[2] * * cdef ndarray best = numpy.zeros((B, O), dtype='float32', order='C') # <<<<<<<<<<<<<< * cdef ndarray which = numpy.zeros((B, O), dtype='int32', order='C') * cpu_maxout(best.data, which.data, */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 607, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_order, __pyx_n_s_C) < 0) __PYX_ERR(0, 607, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 607, __pyx_L1_error) __pyx_v_best = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":608 * * cdef ndarray best = numpy.zeros((B, O), dtype='float32', order='C') * cdef ndarray which = numpy.zeros((B, O), dtype='int32', order='C') # <<<<<<<<<<<<<< * cpu_maxout(best.data, which.data, * &py_cands[0, 0, 0], B, O, P) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_n_s_int32) < 0) __PYX_ERR(0, 608, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_order, __pyx_n_s_C) < 0) __PYX_ERR(0, 608, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 608, __pyx_L1_error) __pyx_v_which = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":610 * cdef ndarray which = numpy.zeros((B, O), dtype='int32', order='C') * cpu_maxout(best.data, which.data, * &py_cands[0, 0, 0], B, O, P) # <<<<<<<<<<<<<< * return best, which * */ __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = -1; if (__pyx_t_5 < 0) { __pyx_t_5 += __pyx_v_py_cands.shape[0]; if (unlikely(__pyx_t_5 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_5 >= __pyx_v_py_cands.shape[0])) __pyx_t_8 = 0; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_py_cands.shape[1]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_6 >= __pyx_v_py_cands.shape[1])) __pyx_t_8 = 1; if (__pyx_t_7 < 0) { __pyx_t_7 += __pyx_v_py_cands.shape[2]; if (unlikely(__pyx_t_7 < 0)) __pyx_t_8 = 2; } else if (unlikely(__pyx_t_7 >= __pyx_v_py_cands.shape[2])) __pyx_t_8 = 2; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); __PYX_ERR(0, 610, __pyx_L1_error) } /* "thinc/neural/ops.pyx":609 * cdef ndarray best = numpy.zeros((B, O), dtype='float32', order='C') * cdef ndarray which = numpy.zeros((B, O), dtype='int32', order='C') * cpu_maxout(best.data, which.data, # <<<<<<<<<<<<<< * &py_cands[0, 0, 0], B, O, P) * return best, which */ __pyx_f_5thinc_6neural_3ops_cpu_maxout(((float *)__pyx_v_best->data), ((int *)__pyx_v_which->data), (&(*((float const *) ( /* dim=2 */ ((char *) (((float const *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_py_cands.data + __pyx_t_5 * __pyx_v_py_cands.strides[0]) ) + __pyx_t_6 * __pyx_v_py_cands.strides[1]) )) + __pyx_t_7)) )))), __pyx_v_B, __pyx_v_O, __pyx_v_P); /* "thinc/neural/ops.pyx":611 * cpu_maxout(best.data, which.data, * &py_cands[0, 0, 0], B, O, P) * return best, which # <<<<<<<<<<<<<< * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_best)); __Pyx_GIVEREF(((PyObject *)__pyx_v_best)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_best)); __Pyx_INCREF(((PyObject *)__pyx_v_which)); __Pyx_GIVEREF(((PyObject *)__pyx_v_which)); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_which)); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":601 * return dX * * def maxout(self, const float[:, :, ::1] py_cands): # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_mem); __Pyx_XDECREF((PyObject *)__pyx_v_best); __Pyx_XDECREF((PyObject *)__pyx_v_which); __PYX_XDEC_MEMVIEW(&__pyx_v_py_cands, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":613 * return best, which * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): # <<<<<<<<<<<<<< * cdef int B = dX__bo.shape[0] * cdef int O = dX__bo.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_27backprop_maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_27backprop_maxout = {"backprop_maxout", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_27backprop_maxout, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_27backprop_maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_dX__bo = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_which__bo = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_P; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_maxout (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dX__bo,&__pyx_n_s_which__bo,&__pyx_n_s_P,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dX__bo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 1); __PYX_ERR(0, 613, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which__bo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 2); __PYX_ERR(0, 613, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_P)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 3); __PYX_ERR(0, 613, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_maxout") < 0)) __PYX_ERR(0, 613, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_dX__bo = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_dX__bo.memview)) __PYX_ERR(0, 613, __pyx_L3_error) __pyx_v_which__bo = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_which__bo.memview)) __PYX_ERR(0, 613, __pyx_L3_error) __pyx_v_P = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_P == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 613, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 613, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_26backprop_maxout(__pyx_self, __pyx_v_self, __pyx_v_dX__bo, __pyx_v_which__bo, __pyx_v_P); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_26backprop_maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dX__bo, __Pyx_memviewslice __pyx_v_which__bo, int __pyx_v_P) { int __pyx_v_B; int __pyx_v_O; PyArrayObject *__pyx_v_dX__bop = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__72) __Pyx_RefNannySetupContext("backprop_maxout", 0); __Pyx_TraceCall("backprop_maxout", __pyx_f[0], 613, 0, __PYX_ERR(0, 613, __pyx_L1_error)); /* "thinc/neural/ops.pyx":614 * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): * cdef int B = dX__bo.shape[0] # <<<<<<<<<<<<<< * cdef int O = dX__bo.shape[1] * */ __pyx_v_B = (__pyx_v_dX__bo.shape[0]); /* "thinc/neural/ops.pyx":615 * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): * cdef int B = dX__bo.shape[0] * cdef int O = dX__bo.shape[1] # <<<<<<<<<<<<<< * * cdef np.ndarray dX__bop = numpy.zeros((B, O, P), dtype='float32') */ __pyx_v_O = (__pyx_v_dX__bo.shape[1]); /* "thinc/neural/ops.pyx":617 * cdef int O = dX__bo.shape[1] * * cdef np.ndarray dX__bop = numpy.zeros((B, O, P), dtype='float32') # <<<<<<<<<<<<<< * cpu_backprop_maxout(dX__bop.data, * &dX__bo[0, 0], &which__bo[0, 0], B, O, P) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_P); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 617, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 617, __pyx_L1_error) __pyx_v_dX__bop = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":619 * cdef np.ndarray dX__bop = numpy.zeros((B, O, P), dtype='float32') * cpu_backprop_maxout(dX__bop.data, * &dX__bo[0, 0], &which__bo[0, 0], B, O, P) # <<<<<<<<<<<<<< * return dX__bop * */ __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = -1; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_dX__bo.shape[0]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_6 >= __pyx_v_dX__bo.shape[0])) __pyx_t_8 = 0; if (__pyx_t_7 < 0) { __pyx_t_7 += __pyx_v_dX__bo.shape[1]; if (unlikely(__pyx_t_7 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_7 >= __pyx_v_dX__bo.shape[1])) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); __PYX_ERR(0, 619, __pyx_L1_error) } __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_8 = -1; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_which__bo.shape[0]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_9 >= __pyx_v_which__bo.shape[0])) __pyx_t_8 = 0; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_which__bo.shape[1]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_10 >= __pyx_v_which__bo.shape[1])) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); __PYX_ERR(0, 619, __pyx_L1_error) } /* "thinc/neural/ops.pyx":618 * * cdef np.ndarray dX__bop = numpy.zeros((B, O, P), dtype='float32') * cpu_backprop_maxout(dX__bop.data, # <<<<<<<<<<<<<< * &dX__bo[0, 0], &which__bo[0, 0], B, O, P) * return dX__bop */ __pyx_f_5thinc_6neural_3ops_cpu_backprop_maxout(((float *)__pyx_v_dX__bop->data), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_dX__bo.data + __pyx_t_6 * __pyx_v_dX__bo.strides[0]) )) + __pyx_t_7)) )))), (&(*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_which__bo.data + __pyx_t_9 * __pyx_v_which__bo.strides[0]) )) + __pyx_t_10)) )))), __pyx_v_B, __pyx_v_O, __pyx_v_P); /* "thinc/neural/ops.pyx":620 * cpu_backprop_maxout(dX__bop.data, * &dX__bo[0, 0], &which__bo[0, 0], B, O, P) * return dX__bop # <<<<<<<<<<<<<< * * def mish(self, const float[:, ::1] X, threshold=5, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_dX__bop)); __pyx_r = ((PyObject *)__pyx_v_dX__bop); goto __pyx_L0; /* "thinc/neural/ops.pyx":613 * return best, which * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): # <<<<<<<<<<<<<< * cdef int B = dX__bo.shape[0] * cdef int O = dX__bo.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_dX__bop); __PYX_XDEC_MEMVIEW(&__pyx_v_dX__bo, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_which__bo, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":622 * return dX__bop * * def mish(self, const float[:, ::1] X, threshold=5, out=None): # <<<<<<<<<<<<<< * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_29mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_29mish = {"mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_29mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_29mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject *)__pyx_int_5)); values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, 1); __PYX_ERR(0, 622, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mish") < 0)) __PYX_ERR(0, 622, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 622, __pyx_L3_error) __pyx_v_threshold = values[2]; __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 622, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_28mish(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_28mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_shape = NULL; PyArrayObject *__pyx_v_Y = 0; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; PyObject *(*__pyx_t_5)(PyObject *); Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_t_10; float __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__73) __Pyx_RefNannySetupContext("mish", 0); __Pyx_TraceCall("mish", __pyx_f[0], 622, 0, __PYX_ERR(0, 622, __pyx_L1_error)); /* "thinc/neural/ops.pyx":623 * * def mish(self, const float[:, ::1] X, threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] # <<<<<<<<<<<<<< * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") * cpu_mish(Y.data, */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_X, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 623, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 623, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 623, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_5(__pyx_t_3); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 623, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 623, __pyx_L1_error) __pyx_t_2 = PyInt_FromSsize_t((__pyx_v_X.shape[__pyx_t_6])); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 623, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_shape = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":624 * def mish(self, const float[:, ::1] X, threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") # <<<<<<<<<<<<<< * cpu_mish(Y.data, * &X[0, 0], threshold, X.size) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyList_AsTuple(__pyx_v_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_f) < 0) __PYX_ERR(0, 624, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 624, __pyx_L1_error) __pyx_v_Y = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":626 * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") * cpu_mish(Y.data, * &X[0, 0], threshold, X.size) # <<<<<<<<<<<<<< * if out is not None: * out[:] = Y */ __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = -1; if (__pyx_t_8 < 0) { __pyx_t_8 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_8 < 0)) __pyx_t_10 = 0; } else if (unlikely(__pyx_t_8 >= __pyx_v_X.shape[0])) __pyx_t_10 = 0; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_X.shape[1]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_10 = 1; } else if (unlikely(__pyx_t_9 >= __pyx_v_X.shape[1])) __pyx_t_10 = 1; if (unlikely(__pyx_t_10 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_10); __PYX_ERR(0, 626, __pyx_L1_error) } __pyx_t_11 = __pyx_PyFloat_AsFloat(__pyx_v_threshold); if (unlikely((__pyx_t_11 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 626, __pyx_L1_error) __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_X, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 626, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":625 * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") * cpu_mish(Y.data, # <<<<<<<<<<<<<< * &X[0, 0], threshold, X.size) * if out is not None: */ __pyx_f_5thinc_6neural_3ops_cpu_mish(((float *)__pyx_v_Y->data), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_8 * __pyx_v_X.strides[0]) )) + __pyx_t_9)) )))), __pyx_t_11, __pyx_t_10); /* "thinc/neural/ops.pyx":627 * cpu_mish(Y.data, * &X[0, 0], threshold, X.size) * if out is not None: # <<<<<<<<<<<<<< * out[:] = Y * return out */ __pyx_t_12 = (__pyx_v_out != Py_None); __pyx_t_13 = (__pyx_t_12 != 0); if (__pyx_t_13) { /* "thinc/neural/ops.pyx":628 * &X[0, 0], threshold, X.size) * if out is not None: * out[:] = Y # <<<<<<<<<<<<<< * return out * else: */ if (__Pyx_PyObject_SetSlice(__pyx_v_out, ((PyObject *)__pyx_v_Y), 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 628, __pyx_L1_error) /* "thinc/neural/ops.pyx":629 * if out is not None: * out[:] = Y * return out # <<<<<<<<<<<<<< * else: * return Y */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":627 * cpu_mish(Y.data, * &X[0, 0], threshold, X.size) * if out is not None: # <<<<<<<<<<<<<< * out[:] = Y * return out */ } /* "thinc/neural/ops.pyx":631 * return out * else: * return Y # <<<<<<<<<<<<<< * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; } /* "thinc/neural/ops.pyx":622 * return dX__bop * * def mish(self, const float[:, ::1] X, threshold=5, out=None): # <<<<<<<<<<<<<< * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_shape); __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XDECREF(__pyx_v_i); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":633 * return Y * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, # <<<<<<<<<<<<<< * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_31backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_31backprop_mish = {"backprop_mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_31backprop_mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_31backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_dY = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_5)); /* "thinc/neural/ops.pyx":634 * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, * threshold=5, out=None): # <<<<<<<<<<<<<< * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray dX = self.allocate(tuple(shape), dtype="f") */ values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 1); __PYX_ERR(0, 633, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 2); __PYX_ERR(0, 633, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_mish") < 0)) __PYX_ERR(0, 633, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_dY = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_dY.memview)) __PYX_ERR(0, 633, __pyx_L3_error) __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[2], 0); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 633, __pyx_L3_error) __pyx_v_threshold = values[3]; __pyx_v_out = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 633, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_30backprop_mish(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* "thinc/neural/ops.pyx":633 * return Y * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, # <<<<<<<<<<<<<< * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_30backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dY, __Pyx_memviewslice __pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_v_shape = NULL; PyArrayObject *__pyx_v_dX = 0; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; PyObject *(*__pyx_t_5)(PyObject *); Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; float __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__74) __Pyx_RefNannySetupContext("backprop_mish", 0); __Pyx_TraceCall("backprop_mish", __pyx_f[0], 633, 0, __PYX_ERR(0, 633, __pyx_L1_error)); /* "thinc/neural/ops.pyx":635 * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] # <<<<<<<<<<<<<< * cdef np.ndarray dX = self.allocate(tuple(shape), dtype="f") * cpu_backprop_mish(dX.data, */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_X, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 635, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 635, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(0, 635, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_5(__pyx_t_3); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 635, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 635, __pyx_L1_error) __pyx_t_2 = PyInt_FromSsize_t((__pyx_v_X.shape[__pyx_t_6])); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 635, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_shape = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":636 * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray dX = self.allocate(tuple(shape), dtype="f") # <<<<<<<<<<<<<< * cpu_backprop_mish(dX.data, * &dY[0, 0], &X[0, 0], threshold, X.size) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyList_AsTuple(__pyx_v_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_f) < 0) __PYX_ERR(0, 636, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 636, __pyx_L1_error) __pyx_v_dX = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":638 * cdef np.ndarray dX = self.allocate(tuple(shape), dtype="f") * cpu_backprop_mish(dX.data, * &dY[0, 0], &X[0, 0], threshold, X.size) # <<<<<<<<<<<<<< * if out is not None: * out[:] = dX */ __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = -1; if (__pyx_t_8 < 0) { __pyx_t_8 += __pyx_v_dY.shape[0]; if (unlikely(__pyx_t_8 < 0)) __pyx_t_10 = 0; } else if (unlikely(__pyx_t_8 >= __pyx_v_dY.shape[0])) __pyx_t_10 = 0; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_dY.shape[1]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_10 = 1; } else if (unlikely(__pyx_t_9 >= __pyx_v_dY.shape[1])) __pyx_t_10 = 1; if (unlikely(__pyx_t_10 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_10); __PYX_ERR(0, 638, __pyx_L1_error) } __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_10 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_11 < 0)) __pyx_t_10 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_v_X.shape[0])) __pyx_t_10 = 0; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_v_X.shape[1]; if (unlikely(__pyx_t_12 < 0)) __pyx_t_10 = 1; } else if (unlikely(__pyx_t_12 >= __pyx_v_X.shape[1])) __pyx_t_10 = 1; if (unlikely(__pyx_t_10 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_10); __PYX_ERR(0, 638, __pyx_L1_error) } __pyx_t_13 = __pyx_PyFloat_AsFloat(__pyx_v_threshold); if (unlikely((__pyx_t_13 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 638, __pyx_L1_error) __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_X, 2, (PyObject *(*)(char *)) __pyx_memview_get_float__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":637 * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray dX = self.allocate(tuple(shape), dtype="f") * cpu_backprop_mish(dX.data, # <<<<<<<<<<<<<< * &dY[0, 0], &X[0, 0], threshold, X.size) * if out is not None: */ __pyx_f_5thinc_6neural_3ops_cpu_backprop_mish(((float *)__pyx_v_dX->data), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_dY.data + __pyx_t_8 * __pyx_v_dY.strides[0]) )) + __pyx_t_9)) )))), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_11 * __pyx_v_X.strides[0]) )) + __pyx_t_12)) )))), __pyx_t_13, __pyx_t_10); /* "thinc/neural/ops.pyx":639 * cpu_backprop_mish(dX.data, * &dY[0, 0], &X[0, 0], threshold, X.size) * if out is not None: # <<<<<<<<<<<<<< * out[:] = dX * return out */ __pyx_t_14 = (__pyx_v_out != Py_None); __pyx_t_15 = (__pyx_t_14 != 0); if (__pyx_t_15) { /* "thinc/neural/ops.pyx":640 * &dY[0, 0], &X[0, 0], threshold, X.size) * if out is not None: * out[:] = dX # <<<<<<<<<<<<<< * return out * else: */ if (__Pyx_PyObject_SetSlice(__pyx_v_out, ((PyObject *)__pyx_v_dX), 0, 0, NULL, NULL, &__pyx_slice__3, 0, 0, 1) < 0) __PYX_ERR(0, 640, __pyx_L1_error) /* "thinc/neural/ops.pyx":641 * if out is not None: * out[:] = dX * return out # <<<<<<<<<<<<<< * else: * return dX */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":639 * cpu_backprop_mish(dX.data, * &dY[0, 0], &X[0, 0], threshold, X.size) * if out is not None: # <<<<<<<<<<<<<< * out[:] = dX * return out */ } /* "thinc/neural/ops.pyx":643 * return out * else: * return dX # <<<<<<<<<<<<<< * * #def lstm(self, float[:, ::1] output, float[:, ::1] cells, */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_dX)); __pyx_r = ((PyObject *)__pyx_v_dX); goto __pyx_L0; } /* "thinc/neural/ops.pyx":633 * return Y * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, # <<<<<<<<<<<<<< * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_shape); __Pyx_XDECREF((PyObject *)__pyx_v_dX); __Pyx_XDECREF(__pyx_v_i); __PYX_XDEC_MEMVIEW(&__pyx_v_dY, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":658 * # cells.shape[0], cells.shape[1]) * * def seq2col(self, const float[:, ::1] seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_33seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5thinc_6neural_3ops_8NumpyOps_32seq2col[] = "Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.\n The new sequence is constructed by concatenating nW preceding and succeeding\n vectors onto each column in the sequence, to extract a window of features.\n "; static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_33seq2col = {"seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_33seq2col, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5thinc_6neural_3ops_8NumpyOps_32seq2col}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_33seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_seq = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_seq,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seq)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 1); __PYX_ERR(0, 658, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 2); __PYX_ERR(0, 658, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "seq2col") < 0)) __PYX_ERR(0, 658, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_seq = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_seq.memview)) __PYX_ERR(0, 658, __pyx_L3_error) __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 658, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 658, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_32seq2col(__pyx_self, __pyx_v_self, __pyx_v_seq, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_32seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_seq, int __pyx_v_nW) { int __pyx_v_B; int __pyx_v_I; PyArrayObject *__pyx_v_cols = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__75) __Pyx_RefNannySetupContext("seq2col", 0); __Pyx_TraceCall("seq2col", __pyx_f[0], 658, 0, __PYX_ERR(0, 658, __pyx_L1_error)); /* "thinc/neural/ops.pyx":663 * vectors onto each column in the sequence, to extract a window of features. * ''' * cdef int B = seq.shape[0] # <<<<<<<<<<<<<< * cdef int I = seq.shape[1] * cdef ndarray cols = self.allocate((B, (2 * nW+1) * I), dtype='float32') */ __pyx_v_B = (__pyx_v_seq.shape[0]); /* "thinc/neural/ops.pyx":664 * ''' * cdef int B = seq.shape[0] * cdef int I = seq.shape[1] # <<<<<<<<<<<<<< * cdef ndarray cols = self.allocate((B, (2 * nW+1) * I), dtype='float32') * seq2col(cols.data, &seq[0,0], nW, B, I) */ __pyx_v_I = (__pyx_v_seq.shape[1]); /* "thinc/neural/ops.pyx":665 * cdef int B = seq.shape[0] * cdef int I = seq.shape[1] * cdef ndarray cols = self.allocate((B, (2 * nW+1) * I), dtype='float32') # <<<<<<<<<<<<<< * seq2col(cols.data, &seq[0,0], nW, B, I) * return cols */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_long((((2 * __pyx_v_nW) + 1) * __pyx_v_I)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 665, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 665, __pyx_L1_error) __pyx_v_cols = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":666 * cdef int I = seq.shape[1] * cdef ndarray cols = self.allocate((B, (2 * nW+1) * I), dtype='float32') * seq2col(cols.data, &seq[0,0], nW, B, I) # <<<<<<<<<<<<<< * return cols * */ __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = -1; if (__pyx_t_5 < 0) { __pyx_t_5 += __pyx_v_seq.shape[0]; if (unlikely(__pyx_t_5 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_5 >= __pyx_v_seq.shape[0])) __pyx_t_7 = 0; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_seq.shape[1]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_6 >= __pyx_v_seq.shape[1])) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); __PYX_ERR(0, 666, __pyx_L1_error) } __pyx_f_5thinc_6neural_3ops_seq2col(((float *)__pyx_v_cols->data), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_seq.data + __pyx_t_5 * __pyx_v_seq.strides[0]) )) + __pyx_t_6)) )))), __pyx_v_nW, __pyx_v_B, __pyx_v_I); /* "thinc/neural/ops.pyx":667 * cdef ndarray cols = self.allocate((B, (2 * nW+1) * I), dtype='float32') * seq2col(cols.data, &seq[0,0], nW, B, I) * return cols # <<<<<<<<<<<<<< * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_cols)); __pyx_r = ((PyObject *)__pyx_v_cols); goto __pyx_L0; /* "thinc/neural/ops.pyx":658 * # cells.shape[0], cells.shape[1]) * * def seq2col(self, const float[:, ::1] seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cols); __PYX_XDEC_MEMVIEW(&__pyx_v_seq, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":669 * return cols * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): # <<<<<<<<<<<<<< * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_35backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_35backprop_seq2col = {"backprop_seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_35backprop_seq2col, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_35backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_dY = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 1); __PYX_ERR(0, 669, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 2); __PYX_ERR(0, 669, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_seq2col") < 0)) __PYX_ERR(0, 669, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_dY = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_dY.memview)) __PYX_ERR(0, 669, __pyx_L3_error) __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 669, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 669, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_34backprop_seq2col(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_34backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_dY, int __pyx_v_nW) { int __pyx_v_B; int __pyx_v_nF; int __pyx_v_I; PyArrayObject *__pyx_v_dX = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__76) __Pyx_RefNannySetupContext("backprop_seq2col", 0); __Pyx_TraceCall("backprop_seq2col", __pyx_f[0], 669, 0, __PYX_ERR(0, 669, __pyx_L1_error)); /* "thinc/neural/ops.pyx":670 * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): * cdef int B = dY.shape[0] # <<<<<<<<<<<<<< * cdef int nF = nW*2+1 * cdef int I = dY.shape[1] / nF */ __pyx_v_B = (__pyx_v_dY.shape[0]); /* "thinc/neural/ops.pyx":671 * def backprop_seq2col(self, const float[:, ::1] dY, int nW): * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 # <<<<<<<<<<<<<< * cdef int I = dY.shape[1] / nF * cdef ndarray dX = self.allocate((B, I), dtype='float32') */ __pyx_v_nF = ((__pyx_v_nW * 2) + 1); /* "thinc/neural/ops.pyx":672 * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 * cdef int I = dY.shape[1] / nF # <<<<<<<<<<<<<< * cdef ndarray dX = self.allocate((B, I), dtype='float32') * backprop_seq2col(dX.data, &dY[0,0], B, I, nW) */ __pyx_v_I = ((__pyx_v_dY.shape[1]) / __pyx_v_nF); /* "thinc/neural/ops.pyx":673 * cdef int nF = nW*2+1 * cdef int I = dY.shape[1] / nF * cdef ndarray dX = self.allocate((B, I), dtype='float32') # <<<<<<<<<<<<<< * backprop_seq2col(dX.data, &dY[0,0], B, I, nW) * return dX */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_I); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 673, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 673, __pyx_L1_error) __pyx_v_dX = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":674 * cdef int I = dY.shape[1] / nF * cdef ndarray dX = self.allocate((B, I), dtype='float32') * backprop_seq2col(dX.data, &dY[0,0], B, I, nW) # <<<<<<<<<<<<<< * return dX * */ __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = -1; if (__pyx_t_5 < 0) { __pyx_t_5 += __pyx_v_dY.shape[0]; if (unlikely(__pyx_t_5 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_5 >= __pyx_v_dY.shape[0])) __pyx_t_7 = 0; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_dY.shape[1]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_6 >= __pyx_v_dY.shape[1])) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); __PYX_ERR(0, 674, __pyx_L1_error) } __pyx_f_5thinc_6neural_3ops_backprop_seq2col(((float *)__pyx_v_dX->data), (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_dY.data + __pyx_t_5 * __pyx_v_dY.strides[0]) )) + __pyx_t_6)) )))), __pyx_v_B, __pyx_v_I, __pyx_v_nW); /* "thinc/neural/ops.pyx":675 * cdef ndarray dX = self.allocate((B, I), dtype='float32') * backprop_seq2col(dX.data, &dY[0,0], B, I, nW) * return dX # <<<<<<<<<<<<<< * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_dX)); __pyx_r = ((PyObject *)__pyx_v_dX); goto __pyx_L0; /* "thinc/neural/ops.pyx":669 * return cols * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): # <<<<<<<<<<<<<< * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_dX); __PYX_XDEC_MEMVIEW(&__pyx_v_dY, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":677 * return dX * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): # <<<<<<<<<<<<<< * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_37remap_ids(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_37remap_ids = {"remap_ids", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_37remap_ids, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_37remap_ids(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; struct __pyx_obj_7preshed_4maps_PreshMap *__pyx_v_mapping = 0; __Pyx_memviewslice __pyx_v_ids_mv = { 0, 0, { 0 }, { 0 }, { 0 } }; uint64_t __pyx_v_value; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("remap_ids (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_mapping,&__pyx_n_s_ids_mv,&__pyx_n_s_value,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mapping)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("remap_ids", 0, 3, 4, 1); __PYX_ERR(0, 677, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ids_mv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("remap_ids", 0, 3, 4, 2); __PYX_ERR(0, 677, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_value); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "remap_ids") < 0)) __PYX_ERR(0, 677, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_mapping = ((struct __pyx_obj_7preshed_4maps_PreshMap *)values[1]); __pyx_v_ids_mv = __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_ids_mv.memview)) __PYX_ERR(0, 677, __pyx_L3_error) if (values[3]) { __pyx_v_value = __Pyx_PyInt_As_uint64_t(values[3]); if (unlikely((__pyx_v_value == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 677, __pyx_L3_error) } else { __pyx_v_value = ((uint64_t)((uint64_t)0)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("remap_ids", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 677, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.remap_ids", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mapping), __pyx_ptype_7preshed_4maps_PreshMap, 1, "mapping", 0))) __PYX_ERR(0, 677, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_36remap_ids(__pyx_self, __pyx_v_self, __pyx_v_mapping, __pyx_v_ids_mv, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_36remap_ids(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, struct __pyx_obj_7preshed_4maps_PreshMap *__pyx_v_mapping, __Pyx_memviewslice __pyx_v_ids_mv, uint64_t __pyx_v_value) { uint64_t *__pyx_v_ids; PyArrayObject *__pyx_v_output_arr = 0; uint64_t *__pyx_v_output; CYTHON_UNUSED uint64_t __pyx_v_key; Py_ssize_t __pyx_v_i; uint64_t __pyx_v_mapped; __Pyx_LocalBuf_ND __pyx_pybuffernd_output_arr; __Pyx_Buffer __pyx_pybuffer_output_arr; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__77) __Pyx_RefNannySetupContext("remap_ids", 0); __Pyx_TraceCall("remap_ids", __pyx_f[0], 677, 0, __PYX_ERR(0, 677, __pyx_L1_error)); __pyx_pybuffer_output_arr.pybuffer.buf = NULL; __pyx_pybuffer_output_arr.refcount = 0; __pyx_pybuffernd_output_arr.data = NULL; __pyx_pybuffernd_output_arr.rcbuffer = &__pyx_pybuffer_output_arr; /* "thinc/neural/ops.pyx":678 * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): * cdef uint64_t* ids = &ids_mv[0] # <<<<<<<<<<<<<< * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') * output = output_arr.data */ __pyx_t_1 = 0; __pyx_t_2 = -1; if (__pyx_t_1 < 0) { __pyx_t_1 += __pyx_v_ids_mv.shape[0]; if (unlikely(__pyx_t_1 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_1 >= __pyx_v_ids_mv.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 678, __pyx_L1_error) } __pyx_v_ids = (&(*((uint64_t *) ( /* dim=0 */ ((char *) (((uint64_t *) __pyx_v_ids_mv.data) + __pyx_t_1)) )))); /* "thinc/neural/ops.pyx":679 * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') # <<<<<<<<<<<<<< * output = output_arr.data * cdef uint64_t key = 0 */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_MemoryView_Len(__pyx_v_ids_mv); __pyx_t_5 = __Pyx_PyInt_FromSize_t(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_n_s_uint64) < 0) __PYX_ERR(0, 679, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 679, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_7); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_output_arr.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn_uint64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_output_arr = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_output_arr.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 679, __pyx_L1_error) } else {__pyx_pybuffernd_output_arr.diminfo[0].strides = __pyx_pybuffernd_output_arr.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_output_arr.diminfo[0].shape = __pyx_pybuffernd_output_arr.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_output_arr = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":680 * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') * output = output_arr.data # <<<<<<<<<<<<<< * cdef uint64_t key = 0 * for i in range(ids_mv.shape[0]): */ __pyx_v_output = ((uint64_t *)__pyx_v_output_arr->data); /* "thinc/neural/ops.pyx":681 * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') * output = output_arr.data * cdef uint64_t key = 0 # <<<<<<<<<<<<<< * for i in range(ids_mv.shape[0]): * if ids[i] == 0: */ __pyx_v_key = 0; /* "thinc/neural/ops.pyx":682 * output = output_arr.data * cdef uint64_t key = 0 * for i in range(ids_mv.shape[0]): # <<<<<<<<<<<<<< * if ids[i] == 0: * output[i] = 0 */ __pyx_t_9 = (__pyx_v_ids_mv.shape[0]); __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "thinc/neural/ops.pyx":683 * cdef uint64_t key = 0 * for i in range(ids_mv.shape[0]): * if ids[i] == 0: # <<<<<<<<<<<<<< * output[i] = 0 * else: */ __pyx_t_12 = (((__pyx_v_ids[__pyx_v_i]) == 0) != 0); if (__pyx_t_12) { /* "thinc/neural/ops.pyx":684 * for i in range(ids_mv.shape[0]): * if ids[i] == 0: * output[i] = 0 # <<<<<<<<<<<<<< * else: * mapped = mapping.get(ids[i]) */ (__pyx_v_output[__pyx_v_i]) = 0; /* "thinc/neural/ops.pyx":683 * cdef uint64_t key = 0 * for i in range(ids_mv.shape[0]): * if ids[i] == 0: # <<<<<<<<<<<<<< * output[i] = 0 * else: */ goto __pyx_L5; } /* "thinc/neural/ops.pyx":686 * output[i] = 0 * else: * mapped = mapping.get(ids[i]) # <<<<<<<<<<<<<< * if mapped != 0: * output[i] = mapped */ /*else*/ { __pyx_v_mapped = ((uint64_t)((struct __pyx_vtabstruct_7preshed_4maps_PreshMap *)__pyx_v_mapping->__pyx_vtab)->get(__pyx_v_mapping, (__pyx_v_ids[__pyx_v_i]))); /* "thinc/neural/ops.pyx":687 * else: * mapped = mapping.get(ids[i]) * if mapped != 0: # <<<<<<<<<<<<<< * output[i] = mapped * else: */ __pyx_t_12 = ((__pyx_v_mapped != 0) != 0); if (__pyx_t_12) { /* "thinc/neural/ops.pyx":688 * mapped = mapping.get(ids[i]) * if mapped != 0: * output[i] = mapped # <<<<<<<<<<<<<< * else: * output[i] = value */ (__pyx_v_output[__pyx_v_i]) = __pyx_v_mapped; /* "thinc/neural/ops.pyx":687 * else: * mapped = mapping.get(ids[i]) * if mapped != 0: # <<<<<<<<<<<<<< * output[i] = mapped * else: */ goto __pyx_L6; } /* "thinc/neural/ops.pyx":690 * output[i] = mapped * else: * output[i] = value # <<<<<<<<<<<<<< * if value != 0: * mapping.set(ids[i], value) */ /*else*/ { (__pyx_v_output[__pyx_v_i]) = __pyx_v_value; /* "thinc/neural/ops.pyx":691 * else: * output[i] = value * if value != 0: # <<<<<<<<<<<<<< * mapping.set(ids[i], value) * value += 1 */ __pyx_t_12 = ((__pyx_v_value != 0) != 0); if (__pyx_t_12) { /* "thinc/neural/ops.pyx":692 * output[i] = value * if value != 0: * mapping.set(ids[i], value) # <<<<<<<<<<<<<< * value += 1 * return output_arr */ ((struct __pyx_vtabstruct_7preshed_4maps_PreshMap *)__pyx_v_mapping->__pyx_vtab)->set(__pyx_v_mapping, (__pyx_v_ids[__pyx_v_i]), ((void *)__pyx_v_value)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 692, __pyx_L1_error) /* "thinc/neural/ops.pyx":693 * if value != 0: * mapping.set(ids[i], value) * value += 1 # <<<<<<<<<<<<<< * return output_arr * */ __pyx_v_value = (__pyx_v_value + 1); /* "thinc/neural/ops.pyx":691 * else: * output[i] = value * if value != 0: # <<<<<<<<<<<<<< * mapping.set(ids[i], value) * value += 1 */ } } __pyx_L6:; } __pyx_L5:; } /* "thinc/neural/ops.pyx":694 * mapping.set(ids[i], value) * value += 1 * return output_arr # <<<<<<<<<<<<<< * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_output_arr)); __pyx_r = ((PyObject *)__pyx_v_output_arr); goto __pyx_L0; /* "thinc/neural/ops.pyx":677 * return dX * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): # <<<<<<<<<<<<<< * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_output_arr.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.remap_ids", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_output_arr.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_output_arr); __PYX_XDEC_MEMVIEW(&__pyx_v_ids_mv, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":696 * return output_arr * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): # <<<<<<<<<<<<<< * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_39increment_slices(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_39increment_slices = {"increment_slices", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_39increment_slices, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_39increment_slices(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_contig_array = 0; PyArrayObject *__pyx_v__to_add = 0; PyObject *__pyx_v__starts = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("increment_slices (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_contig_array,&__pyx_n_s_to_add,&__pyx_n_s_starts,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_contig_array)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("increment_slices", 1, 4, 4, 1); __PYX_ERR(0, 696, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_to_add)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("increment_slices", 1, 4, 4, 2); __PYX_ERR(0, 696, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_starts)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("increment_slices", 1, 4, 4, 3); __PYX_ERR(0, 696, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "increment_slices") < 0)) __PYX_ERR(0, 696, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_contig_array = ((PyArrayObject *)values[1]); __pyx_v__to_add = ((PyArrayObject *)values[2]); __pyx_v__starts = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("increment_slices", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 696, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.increment_slices", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_contig_array), __pyx_ptype_5numpy_ndarray, 1, "contig_array", 0))) __PYX_ERR(0, 696, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__to_add), __pyx_ptype_5numpy_ndarray, 1, "_to_add", 0))) __PYX_ERR(0, 696, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_38increment_slices(__pyx_self, __pyx_v_self, __pyx_v_contig_array, __pyx_v__to_add, __pyx_v__starts); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_38increment_slices(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyArrayObject *__pyx_v_contig_array, PyArrayObject *__pyx_v__to_add, PyObject *__pyx_v__starts) { PyArrayObject *__pyx_v_contig_to_add = 0; PyArrayObject *__pyx_v_contig_starts = 0; float const *__pyx_v_to_add; float *__pyx_v_whole_array; int const *__pyx_v_starts; int __pyx_v_n_slice; int __pyx_v_length; int __pyx_v_stride; int __pyx_v_start; float *__pyx_v_workon; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; int __pyx_t_6; int const *__pyx_t_7; int const *__pyx_t_8; int const *__pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__78) __Pyx_RefNannySetupContext("increment_slices", 0); __Pyx_TraceCall("increment_slices", __pyx_f[0], 696, 0, __PYX_ERR(0, 696, __pyx_L1_error)); /* "thinc/neural/ops.pyx":697 * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') # <<<<<<<<<<<<<< * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v__to_add)); __Pyx_GIVEREF(((PyObject *)__pyx_v__to_add)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v__to_add)); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 697, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 697, __pyx_L1_error) __pyx_v_contig_to_add = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":698 * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') # <<<<<<<<<<<<<< * * cdef const float* to_add = contig_to_add.data */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v__starts); __Pyx_GIVEREF(__pyx_v__starts); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v__starts); __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_n_s_int32) < 0) __PYX_ERR(0, 698, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 698, __pyx_L1_error) __pyx_v_contig_starts = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":700 * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') * * cdef const float* to_add = contig_to_add.data # <<<<<<<<<<<<<< * cdef float* whole_array = contig_array.data * cdef const int* starts = contig_starts.data */ __pyx_v_to_add = ((__pyx_t_5thinc_8typedefs_weight_t const *)__pyx_v_contig_to_add->data); /* "thinc/neural/ops.pyx":701 * * cdef const float* to_add = contig_to_add.data * cdef float* whole_array = contig_array.data # <<<<<<<<<<<<<< * cdef const int* starts = contig_starts.data * cdef int n_slice = len(_starts) */ __pyx_v_whole_array = ((__pyx_t_5thinc_8typedefs_weight_t *)__pyx_v_contig_array->data); /* "thinc/neural/ops.pyx":702 * cdef const float* to_add = contig_to_add.data * cdef float* whole_array = contig_array.data * cdef const int* starts = contig_starts.data # <<<<<<<<<<<<<< * cdef int n_slice = len(_starts) * cdef int length = _to_add.size */ __pyx_v_starts = ((int const *)__pyx_v_contig_starts->data); /* "thinc/neural/ops.pyx":703 * cdef float* whole_array = contig_array.data * cdef const int* starts = contig_starts.data * cdef int n_slice = len(_starts) # <<<<<<<<<<<<<< * cdef int length = _to_add.size * cdef int stride = length / _to_add.shape[0] */ __pyx_t_5 = PyObject_Length(__pyx_v__starts); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 703, __pyx_L1_error) __pyx_v_n_slice = __pyx_t_5; /* "thinc/neural/ops.pyx":704 * cdef const int* starts = contig_starts.data * cdef int n_slice = len(_starts) * cdef int length = _to_add.size # <<<<<<<<<<<<<< * cdef int stride = length / _to_add.shape[0] * for start in starts[:n_slice]: */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v__to_add), __pyx_n_s_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 704, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_length = __pyx_t_6; /* "thinc/neural/ops.pyx":705 * cdef int n_slice = len(_starts) * cdef int length = _to_add.size * cdef int stride = length / _to_add.shape[0] # <<<<<<<<<<<<<< * for start in starts[:n_slice]: * workon = &whole_array[start * stride] */ __pyx_v_stride = (__pyx_v_length / (__pyx_v__to_add->dimensions[0])); /* "thinc/neural/ops.pyx":706 * cdef int length = _to_add.size * cdef int stride = length / _to_add.shape[0] * for start in starts[:n_slice]: # <<<<<<<<<<<<<< * workon = &whole_array[start * stride] * for i in range(length): */ __pyx_t_8 = (__pyx_v_starts + __pyx_v_n_slice); for (__pyx_t_9 = __pyx_v_starts; __pyx_t_9 < __pyx_t_8; __pyx_t_9++) { __pyx_t_7 = __pyx_t_9; __pyx_v_start = (__pyx_t_7[0]); /* "thinc/neural/ops.pyx":707 * cdef int stride = length / _to_add.shape[0] * for start in starts[:n_slice]: * workon = &whole_array[start * stride] # <<<<<<<<<<<<<< * for i in range(length): * workon[i] += to_add[i] */ __pyx_v_workon = (&(__pyx_v_whole_array[(__pyx_v_start * __pyx_v_stride)])); /* "thinc/neural/ops.pyx":708 * for start in starts[:n_slice]: * workon = &whole_array[start * stride] * for i in range(length): # <<<<<<<<<<<<<< * workon[i] += to_add[i] * */ __pyx_t_6 = __pyx_v_length; __pyx_t_10 = __pyx_t_6; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "thinc/neural/ops.pyx":709 * workon = &whole_array[start * stride] * for i in range(length): * workon[i] += to_add[i] # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_t_12 = __pyx_v_i; (__pyx_v_workon[__pyx_t_12]) = ((__pyx_v_workon[__pyx_t_12]) + (__pyx_v_to_add[__pyx_v_i])); } } /* "thinc/neural/ops.pyx":696 * return output_arr * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): # <<<<<<<<<<<<<< * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.increment_slices", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_contig_to_add); __Pyx_XDECREF((PyObject *)__pyx_v_contig_starts); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":713 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, const uint64_t[::1] ids, uint32_t seed): # <<<<<<<<<<<<<< * '''Hash a sequence of 64-bit keys into a table with 4 32-bit keys''' * # Written to mirror the GPU implementation */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_41hash(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5thinc_6neural_3ops_8NumpyOps_40hash[] = "Hash a sequence of 64-bit keys into a table with 4 32-bit keys"; static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_41hash = {"hash", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_41hash, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5thinc_6neural_3ops_8NumpyOps_40hash}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_41hash(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_ids = { 0, 0, { 0 }, { 0 }, { 0 } }; uint32_t __pyx_v_seed; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("hash (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_ids,&__pyx_n_s_seed,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ids)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, 1); __PYX_ERR(0, 713, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, 2); __PYX_ERR(0, 713, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hash") < 0)) __PYX_ERR(0, 713, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_ids = __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t__const__(values[1], 0); if (unlikely(!__pyx_v_ids.memview)) __PYX_ERR(0, 713, __pyx_L3_error) __pyx_v_seed = __Pyx_PyInt_As_uint32_t(values[2]); if (unlikely((__pyx_v_seed == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 713, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 713, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.hash", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_40hash(__pyx_self, __pyx_v_self, __pyx_v_ids, __pyx_v_seed); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_40hash(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_ids, uint32_t __pyx_v_seed) { PyArrayObject *__pyx_v_keys = 0; CYTHON_UNUSED int __pyx_v_i; int __pyx_v_j; unsigned char __pyx_v_entropy[16]; size_t __pyx_v_n_items; size_t __pyx_v_in_size; unsigned char *__pyx_v_src; unsigned char *__pyx_v_dest; __Pyx_LocalBuf_ND __pyx_pybuffernd_keys; __Pyx_Buffer __pyx_pybuffer_keys; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; size_t __pyx_t_6; Py_ssize_t __pyx_t_7; size_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__79) __Pyx_RefNannySetupContext("hash", 0); __Pyx_TraceCall("hash", __pyx_f[0], 713, 0, __PYX_ERR(0, 713, __pyx_L1_error)); __pyx_pybuffer_keys.pybuffer.buf = NULL; __pyx_pybuffer_keys.refcount = 0; __pyx_pybuffernd_keys.data = NULL; __pyx_pybuffernd_keys.rcbuffer = &__pyx_pybuffer_keys; /* "thinc/neural/ops.pyx":716 * '''Hash a sequence of 64-bit keys into a table with 4 32-bit keys''' * # Written to mirror the GPU implementation * cdef ndarray[uint32_t, ndim=2] keys = self.allocate((ids.shape[0], 4), dtype='uint32') # <<<<<<<<<<<<<< * cdef int i, j * cdef unsigned char entropy[16] # 128/8=16 */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t((__pyx_v_ids.shape[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_uint32) < 0) __PYX_ERR(0, 716, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 716, __pyx_L1_error) __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keys.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn_uint32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_keys = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keys.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 716, __pyx_L1_error) } else {__pyx_pybuffernd_keys.diminfo[0].strides = __pyx_pybuffernd_keys.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keys.diminfo[0].shape = __pyx_pybuffernd_keys.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_keys.diminfo[1].strides = __pyx_pybuffernd_keys.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_keys.diminfo[1].shape = __pyx_pybuffernd_keys.rcbuffer->pybuffer.shape[1]; } } __pyx_t_5 = 0; __pyx_v_keys = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":719 * cdef int i, j * cdef unsigned char entropy[16] # 128/8=16 * cdef size_t n_items = len(ids) # <<<<<<<<<<<<<< * cdef size_t in_size = sizeof(uint64_t) * src = &ids[0] */ __pyx_t_6 = __Pyx_MemoryView_Len(__pyx_v_ids); __pyx_v_n_items = __pyx_t_6; /* "thinc/neural/ops.pyx":720 * cdef unsigned char entropy[16] # 128/8=16 * cdef size_t n_items = len(ids) * cdef size_t in_size = sizeof(uint64_t) # <<<<<<<<<<<<<< * src = &ids[0] * dest = keys.data */ __pyx_v_in_size = (sizeof(uint64_t)); /* "thinc/neural/ops.pyx":721 * cdef size_t n_items = len(ids) * cdef size_t in_size = sizeof(uint64_t) * src = &ids[0] # <<<<<<<<<<<<<< * dest = keys.data * for i in range(n_items): */ __pyx_t_7 = 0; __pyx_v_src = ((unsigned char *)(&(*((uint64_t const *) ( /* dim=0 */ ((char *) (((uint64_t const *) __pyx_v_ids.data) + __pyx_t_7)) ))))); /* "thinc/neural/ops.pyx":722 * cdef size_t in_size = sizeof(uint64_t) * src = &ids[0] * dest = keys.data # <<<<<<<<<<<<<< * for i in range(n_items): * hash128_x64(src, in_size, seed, entropy) */ __pyx_v_dest = ((unsigned char *)__pyx_v_keys->data); /* "thinc/neural/ops.pyx":723 * src = &ids[0] * dest = keys.data * for i in range(n_items): # <<<<<<<<<<<<<< * hash128_x64(src, in_size, seed, entropy) * for j in range(16): */ __pyx_t_6 = __pyx_v_n_items; __pyx_t_8 = __pyx_t_6; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_i = __pyx_t_9; /* "thinc/neural/ops.pyx":724 * dest = keys.data * for i in range(n_items): * hash128_x64(src, in_size, seed, entropy) # <<<<<<<<<<<<<< * for j in range(16): * dest[j] = entropy[j] */ __pyx_f_10murmurhash_4mrmr_hash128_x64(((void *)__pyx_v_src), __pyx_v_in_size, __pyx_v_seed, __pyx_v_entropy); /* "thinc/neural/ops.pyx":725 * for i in range(n_items): * hash128_x64(src, in_size, seed, entropy) * for j in range(16): # <<<<<<<<<<<<<< * dest[j] = entropy[j] * src += in_size */ for (__pyx_t_10 = 0; __pyx_t_10 < 16; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "thinc/neural/ops.pyx":726 * hash128_x64(src, in_size, seed, entropy) * for j in range(16): * dest[j] = entropy[j] # <<<<<<<<<<<<<< * src += in_size * dest += 16 */ (__pyx_v_dest[__pyx_v_j]) = (__pyx_v_entropy[__pyx_v_j]); } /* "thinc/neural/ops.pyx":727 * for j in range(16): * dest[j] = entropy[j] * src += in_size # <<<<<<<<<<<<<< * dest += 16 * return keys */ __pyx_v_src = (__pyx_v_src + __pyx_v_in_size); /* "thinc/neural/ops.pyx":728 * dest[j] = entropy[j] * src += in_size * dest += 16 # <<<<<<<<<<<<<< * return keys * */ __pyx_v_dest = (__pyx_v_dest + 16); } /* "thinc/neural/ops.pyx":729 * src += in_size * dest += 16 * return keys # <<<<<<<<<<<<<< * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_keys)); __pyx_r = ((PyObject *)__pyx_v_keys); goto __pyx_L0; /* "thinc/neural/ops.pyx":713 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, const uint64_t[::1] ids, uint32_t seed): # <<<<<<<<<<<<<< * '''Hash a sequence of 64-bit keys into a table with 4 32-bit keys''' * # Written to mirror the GPU implementation */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keys.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.hash", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keys.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_keys); __PYX_XDEC_MEMVIEW(&__pyx_v_ids, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":731 * return keys * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_43mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_43mean_pool = {"mean_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_43mean_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_43mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mean_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, 1); __PYX_ERR(0, 731, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, 2); __PYX_ERR(0, 731, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mean_pool") < 0)) __PYX_ERR(0, 731, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 731, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 731, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 731, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_42mean_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_42mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_means; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; void *__pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__80) __Pyx_RefNannySetupContext("mean_pool", 0); __Pyx_TraceCall("mean_pool", __pyx_f[0], 731, 0, __PYX_ERR(0, 731, __pyx_L1_error)); /* "thinc/neural/ops.pyx":732 * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = X.shape[1] * cdef int T = X.shape[0] */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":733 * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] # <<<<<<<<<<<<<< * cdef int T = X.shape[0] * */ __pyx_v_O = (__pyx_v_X.shape[1]); /* "thinc/neural/ops.pyx":734 * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] * cdef int T = X.shape[0] # <<<<<<<<<<<<<< * * cdef Pool mem = Pool() */ __pyx_v_T = (__pyx_v_X.shape[0]); /* "thinc/neural/ops.pyx":736 * cdef int T = X.shape[0] * * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * means = mem.alloc(B * O, sizeof(float)) * */ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 736, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":737 * * cdef Pool mem = Pool() * means = mem.alloc(B * O, sizeof(float)) # <<<<<<<<<<<<<< * * cpu_mean_pool(means, */ __pyx_t_2 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_B * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_2 == ((void *)NULL))) __PYX_ERR(0, 737, __pyx_L1_error) __pyx_v_means = ((float *)__pyx_t_2); /* "thinc/neural/ops.pyx":740 * * cpu_mean_pool(means, * &X[0, 0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * return cpu_floats_ptr2array(means, (B, O)) * */ __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = -1; if (__pyx_t_3 < 0) { __pyx_t_3 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_3 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_3 >= __pyx_v_X.shape[0])) __pyx_t_5 = 0; if (__pyx_t_4 < 0) { __pyx_t_4 += __pyx_v_X.shape[1]; if (unlikely(__pyx_t_4 < 0)) __pyx_t_5 = 1; } else if (unlikely(__pyx_t_4 >= __pyx_v_X.shape[1])) __pyx_t_5 = 1; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 740, __pyx_L1_error) } __pyx_t_6 = 0; __pyx_t_5 = -1; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_6 >= __pyx_v_lengths.shape[0])) __pyx_t_5 = 0; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 740, __pyx_L1_error) } /* "thinc/neural/ops.pyx":739 * means = mem.alloc(B * O, sizeof(float)) * * cpu_mean_pool(means, # <<<<<<<<<<<<<< * &X[0, 0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(means, (B, O)) */ __pyx_f_5thinc_6neural_3ops_cpu_mean_pool(__pyx_v_means, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_3 * __pyx_v_X.strides[0]) )) + __pyx_t_4)) )))), (&(*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_lengths.data) + __pyx_t_6)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":741 * cpu_mean_pool(means, * &X[0, 0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(means, (B, O)) # <<<<<<<<<<<<<< * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_7); __pyx_t_1 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_means, __pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":731 * return keys * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_mem); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":743 * return cpu_floats_ptr2array(means, (B, O)) * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_45sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_45sum_pool = {"sum_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_45sum_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_45sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sum_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, 1); __PYX_ERR(0, 743, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, 2); __PYX_ERR(0, 743, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sum_pool") < 0)) __PYX_ERR(0, 743, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 743, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 743, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 743, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_44sum_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_44sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_sums; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; void *__pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__81) __Pyx_RefNannySetupContext("sum_pool", 0); __Pyx_TraceCall("sum_pool", __pyx_f[0], 743, 0, __PYX_ERR(0, 743, __pyx_L1_error)); /* "thinc/neural/ops.pyx":744 * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = X.shape[1] * cdef int T = X.shape[0] */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":745 * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] # <<<<<<<<<<<<<< * cdef int T = X.shape[0] * */ __pyx_v_O = (__pyx_v_X.shape[1]); /* "thinc/neural/ops.pyx":746 * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] * cdef int T = X.shape[0] # <<<<<<<<<<<<<< * * cdef Pool mem = Pool() */ __pyx_v_T = (__pyx_v_X.shape[0]); /* "thinc/neural/ops.pyx":748 * cdef int T = X.shape[0] * * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * sums = mem.alloc(B * O, sizeof(float)) * */ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":749 * * cdef Pool mem = Pool() * sums = mem.alloc(B * O, sizeof(float)) # <<<<<<<<<<<<<< * * cpu_sum_pool(sums, */ __pyx_t_2 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_B * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_2 == ((void *)NULL))) __PYX_ERR(0, 749, __pyx_L1_error) __pyx_v_sums = ((float *)__pyx_t_2); /* "thinc/neural/ops.pyx":752 * * cpu_sum_pool(sums, * &X[0, 0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * return cpu_floats_ptr2array(sums, (B, O)) * */ __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = -1; if (__pyx_t_3 < 0) { __pyx_t_3 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_3 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_3 >= __pyx_v_X.shape[0])) __pyx_t_5 = 0; if (__pyx_t_4 < 0) { __pyx_t_4 += __pyx_v_X.shape[1]; if (unlikely(__pyx_t_4 < 0)) __pyx_t_5 = 1; } else if (unlikely(__pyx_t_4 >= __pyx_v_X.shape[1])) __pyx_t_5 = 1; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 752, __pyx_L1_error) } __pyx_t_6 = 0; __pyx_t_5 = -1; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_6 >= __pyx_v_lengths.shape[0])) __pyx_t_5 = 0; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 752, __pyx_L1_error) } /* "thinc/neural/ops.pyx":751 * sums = mem.alloc(B * O, sizeof(float)) * * cpu_sum_pool(sums, # <<<<<<<<<<<<<< * &X[0, 0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(sums, (B, O)) */ __pyx_f_5thinc_6neural_3ops_cpu_sum_pool(__pyx_v_sums, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_3 * __pyx_v_X.strides[0]) )) + __pyx_t_4)) )))), (&(*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_lengths.data) + __pyx_t_6)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":753 * cpu_sum_pool(sums, * &X[0, 0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(sums, (B, O)) # <<<<<<<<<<<<<< * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_7); __pyx_t_1 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_sums, __pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":743 * return cpu_floats_ptr2array(means, (B, O)) * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_mem); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":755 * return cpu_floats_ptr2array(sums, (B, O)) * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_47backprop_mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_47backprop_mean_pool = {"backprop_mean_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_47backprop_mean_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_47backprop_mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_d_means = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_mean_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_means,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_means)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, 1); __PYX_ERR(0, 755, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, 2); __PYX_ERR(0, 755, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_mean_pool") < 0)) __PYX_ERR(0, 755, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_d_means = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_d_means.memview)) __PYX_ERR(0, 755, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 755, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 755, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_46backprop_mean_pool(__pyx_self, __pyx_v_self, __pyx_v_d_means, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_46backprop_mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_means, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; PyObject *__pyx_v_length = NULL; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_dX; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; void *__pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__82) __Pyx_RefNannySetupContext("backprop_mean_pool", 0); __Pyx_TraceCall("backprop_mean_pool", __pyx_f[0], 755, 0, __PYX_ERR(0, 755, __pyx_L1_error)); /* "thinc/neural/ops.pyx":756 * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = d_means.shape[1] * cdef int T = 0 */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":757 * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] # <<<<<<<<<<<<<< * cdef int T = 0 * for length in lengths[:B]: */ __pyx_v_O = (__pyx_v_d_means.shape[1]); /* "thinc/neural/ops.pyx":758 * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] * cdef int T = 0 # <<<<<<<<<<<<<< * for length in lengths[:B]: * T += length */ __pyx_v_T = 0; /* "thinc/neural/ops.pyx":759 * cdef int O = d_means.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ __pyx_t_1.data = __pyx_v_lengths.data; __pyx_t_1.memview = __pyx_v_lengths.memview; __PYX_INC_MEMVIEW(&__pyx_t_1, 0); __pyx_t_2 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_1, __pyx_v_lengths.shape[0], __pyx_v_lengths.strides[0], __pyx_v_lengths.suboffsets[0], 0, 0, &__pyx_t_2, 0, __pyx_v_B, 0, 0, 1, 0, 1) < 0)) { __PYX_ERR(0, 759, __pyx_L1_error) } __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_1, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __pyx_t_1.memview = NULL; __pyx_t_1.data = NULL; if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 759, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 759, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 759, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } } else { __pyx_t_3 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 759, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":760 * cdef int T = 0 * for length in lengths[:B]: * T += length # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyNumber_InPlaceAdd(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_7); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_T = __pyx_t_2; /* "thinc/neural/ops.pyx":759 * cdef int O = d_means.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":761 * for length in lengths[:B]: * T += length * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * dX = mem.alloc(T * O, sizeof(float)) * */ __pyx_t_4 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":762 * T += length * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) # <<<<<<<<<<<<<< * * cpu_backprop_mean_pool(dX, */ __pyx_t_8 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_T * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_8 == ((void *)NULL))) __PYX_ERR(0, 762, __pyx_L1_error) __pyx_v_dX = ((float *)__pyx_t_8); /* "thinc/neural/ops.pyx":765 * * cpu_backprop_mean_pool(dX, * &d_means[0,0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * * return cpu_floats_ptr2array(dX, (T, O)) */ __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_2 = -1; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_d_means.shape[0]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_9 >= __pyx_v_d_means.shape[0])) __pyx_t_2 = 0; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_d_means.shape[1]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_2 = 1; } else if (unlikely(__pyx_t_10 >= __pyx_v_d_means.shape[1])) __pyx_t_2 = 1; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 765, __pyx_L1_error) } __pyx_t_11 = 0; __pyx_t_2 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_11 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_v_lengths.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 765, __pyx_L1_error) } /* "thinc/neural/ops.pyx":764 * dX = mem.alloc(T * O, sizeof(float)) * * cpu_backprop_mean_pool(dX, # <<<<<<<<<<<<<< * &d_means[0,0], &lengths[0], B, T, O) * */ __pyx_f_5thinc_6neural_3ops_cpu_backprop_mean_pool(__pyx_v_dX, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_d_means.data + __pyx_t_9 * __pyx_v_d_means.strides[0]) )) + __pyx_t_10)) )))), (&(*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_lengths.data) + __pyx_t_11)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":767 * &d_means[0,0], &lengths[0], B, T, O) * * return cpu_floats_ptr2array(dX, (T, O)) # <<<<<<<<<<<<<< * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); __pyx_t_4 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_dX, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":755 * return cpu_floats_ptr2array(sums, (B, O)) * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] */ /* function exit code */ __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_length); __Pyx_XDECREF((PyObject *)__pyx_v_mem); __PYX_XDEC_MEMVIEW(&__pyx_v_d_means, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":769 * return cpu_floats_ptr2array(dX, (T, O)) * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_49backprop_sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_49backprop_sum_pool = {"backprop_sum_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_49backprop_sum_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_49backprop_sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_d_sums = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_sum_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_sums,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_sums)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, 1); __PYX_ERR(0, 769, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, 2); __PYX_ERR(0, 769, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_sum_pool") < 0)) __PYX_ERR(0, 769, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_d_sums = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_d_sums.memview)) __PYX_ERR(0, 769, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 769, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 769, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_48backprop_sum_pool(__pyx_self, __pyx_v_self, __pyx_v_d_sums, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_48backprop_sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_sums, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; PyObject *__pyx_v_length = NULL; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_dX; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; void *__pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__83) __Pyx_RefNannySetupContext("backprop_sum_pool", 0); __Pyx_TraceCall("backprop_sum_pool", __pyx_f[0], 769, 0, __PYX_ERR(0, 769, __pyx_L1_error)); /* "thinc/neural/ops.pyx":770 * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = d_sums.shape[1] * cdef int T = 0 */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":771 * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] # <<<<<<<<<<<<<< * cdef int T = 0 * for length in lengths[:B]: */ __pyx_v_O = (__pyx_v_d_sums.shape[1]); /* "thinc/neural/ops.pyx":772 * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] * cdef int T = 0 # <<<<<<<<<<<<<< * for length in lengths[:B]: * T += length */ __pyx_v_T = 0; /* "thinc/neural/ops.pyx":773 * cdef int O = d_sums.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ __pyx_t_1.data = __pyx_v_lengths.data; __pyx_t_1.memview = __pyx_v_lengths.memview; __PYX_INC_MEMVIEW(&__pyx_t_1, 0); __pyx_t_2 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_1, __pyx_v_lengths.shape[0], __pyx_v_lengths.strides[0], __pyx_v_lengths.suboffsets[0], 0, 0, &__pyx_t_2, 0, __pyx_v_B, 0, 0, 1, 0, 1) < 0)) { __PYX_ERR(0, 773, __pyx_L1_error) } __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_1, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __pyx_t_1.memview = NULL; __pyx_t_1.data = NULL; if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 773, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 773, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 773, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } } else { __pyx_t_3 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 773, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":774 * cdef int T = 0 * for length in lengths[:B]: * T += length # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyNumber_InPlaceAdd(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_7); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 774, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_T = __pyx_t_2; /* "thinc/neural/ops.pyx":773 * cdef int O = d_sums.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":775 * for length in lengths[:B]: * T += length * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * dX = mem.alloc(T * O, sizeof(float)) * */ __pyx_t_4 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":776 * T += length * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) # <<<<<<<<<<<<<< * * cpu_backprop_sum_pool(dX, */ __pyx_t_8 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_T * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_8 == ((void *)NULL))) __PYX_ERR(0, 776, __pyx_L1_error) __pyx_v_dX = ((float *)__pyx_t_8); /* "thinc/neural/ops.pyx":779 * * cpu_backprop_sum_pool(dX, * &d_sums[0,0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * return cpu_floats_ptr2array(dX, (T, O)) * */ __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_2 = -1; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_d_sums.shape[0]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_9 >= __pyx_v_d_sums.shape[0])) __pyx_t_2 = 0; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_d_sums.shape[1]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_2 = 1; } else if (unlikely(__pyx_t_10 >= __pyx_v_d_sums.shape[1])) __pyx_t_2 = 1; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 779, __pyx_L1_error) } __pyx_t_11 = 0; __pyx_t_2 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_11 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_v_lengths.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 779, __pyx_L1_error) } /* "thinc/neural/ops.pyx":778 * dX = mem.alloc(T * O, sizeof(float)) * * cpu_backprop_sum_pool(dX, # <<<<<<<<<<<<<< * &d_sums[0,0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(dX, (T, O)) */ __pyx_f_5thinc_6neural_3ops_cpu_backprop_sum_pool(__pyx_v_dX, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_d_sums.data + __pyx_t_9 * __pyx_v_d_sums.strides[0]) )) + __pyx_t_10)) )))), (&(*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_lengths.data) + __pyx_t_11)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":780 * cpu_backprop_sum_pool(dX, * &d_sums[0,0], &lengths[0], B, T, O) * return cpu_floats_ptr2array(dX, (T, O)) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); __pyx_t_4 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_dX, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":769 * return cpu_floats_ptr2array(dX, (T, O)) * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] */ /* function exit code */ __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_length); __Pyx_XDECREF((PyObject *)__pyx_v_mem); __PYX_XDEC_MEMVIEW(&__pyx_v_d_sums, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":783 * * * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_51max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_51max_pool = {"max_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_51max_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_51max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("max_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, 1); __PYX_ERR(0, 783, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, 2); __PYX_ERR(0, 783, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "max_pool") < 0)) __PYX_ERR(0, 783, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 783, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int__const__(values[2], 0); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 783, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 783, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_50max_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_50max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_maxes; int *__pyx_v_which; PyArrayObject *__pyx_v_py_best = 0; PyArrayObject *__pyx_v_py_which = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; void *__pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__84) __Pyx_RefNannySetupContext("max_pool", 0); __Pyx_TraceCall("max_pool", __pyx_f[0], 783, 0, __PYX_ERR(0, 783, __pyx_L1_error)); /* "thinc/neural/ops.pyx":784 * * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = X.shape[1] * cdef int T = X.shape[0] */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":785 * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] # <<<<<<<<<<<<<< * cdef int T = X.shape[0] * */ __pyx_v_O = (__pyx_v_X.shape[1]); /* "thinc/neural/ops.pyx":786 * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] * cdef int T = X.shape[0] # <<<<<<<<<<<<<< * * cdef Pool mem = Pool() */ __pyx_v_T = (__pyx_v_X.shape[0]); /* "thinc/neural/ops.pyx":788 * cdef int T = X.shape[0] * * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * maxes = mem.alloc(B * O, sizeof(float)) * which = mem.alloc(B * O, sizeof(int)) */ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":789 * * cdef Pool mem = Pool() * maxes = mem.alloc(B * O, sizeof(float)) # <<<<<<<<<<<<<< * which = mem.alloc(B * O, sizeof(int)) * */ __pyx_t_2 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_B * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_2 == ((void *)NULL))) __PYX_ERR(0, 789, __pyx_L1_error) __pyx_v_maxes = ((float *)__pyx_t_2); /* "thinc/neural/ops.pyx":790 * cdef Pool mem = Pool() * maxes = mem.alloc(B * O, sizeof(float)) * which = mem.alloc(B * O, sizeof(int)) # <<<<<<<<<<<<<< * * cpu_max_pool(maxes, which, */ __pyx_t_2 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_B * __pyx_v_O), (sizeof(int))); if (unlikely(__pyx_t_2 == ((void *)NULL))) __PYX_ERR(0, 790, __pyx_L1_error) __pyx_v_which = ((int *)__pyx_t_2); /* "thinc/neural/ops.pyx":793 * * cpu_max_pool(maxes, which, * &X[0, 0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * * cdef ndarray py_best = cpu_floats_ptr2array(maxes, (B, O)) */ __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = -1; if (__pyx_t_3 < 0) { __pyx_t_3 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_3 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_3 >= __pyx_v_X.shape[0])) __pyx_t_5 = 0; if (__pyx_t_4 < 0) { __pyx_t_4 += __pyx_v_X.shape[1]; if (unlikely(__pyx_t_4 < 0)) __pyx_t_5 = 1; } else if (unlikely(__pyx_t_4 >= __pyx_v_X.shape[1])) __pyx_t_5 = 1; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 793, __pyx_L1_error) } __pyx_t_6 = 0; __pyx_t_5 = -1; if (__pyx_t_6 < 0) { __pyx_t_6 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_6 < 0)) __pyx_t_5 = 0; } else if (unlikely(__pyx_t_6 >= __pyx_v_lengths.shape[0])) __pyx_t_5 = 0; if (unlikely(__pyx_t_5 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_5); __PYX_ERR(0, 793, __pyx_L1_error) } /* "thinc/neural/ops.pyx":792 * which = mem.alloc(B * O, sizeof(int)) * * cpu_max_pool(maxes, which, # <<<<<<<<<<<<<< * &X[0, 0], &lengths[0], B, T, O) * */ __pyx_f_5thinc_6neural_3ops_cpu_max_pool(__pyx_v_maxes, __pyx_v_which, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_3 * __pyx_v_X.strides[0]) )) + __pyx_t_4)) )))), (&(*((int const *) ( /* dim=0 */ ((char *) (((int const *) __pyx_v_lengths.data) + __pyx_t_6)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":795 * &X[0, 0], &lengths[0], B, T, O) * * cdef ndarray py_best = cpu_floats_ptr2array(maxes, (B, O)) # <<<<<<<<<<<<<< * cdef ndarray py_which = cpu_ints_ptr2array(which, (B, O)) * return py_best, py_which */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_7); __pyx_t_1 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_maxes, __pyx_t_8); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 795, __pyx_L1_error) __pyx_v_py_best = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":796 * * cdef ndarray py_best = cpu_floats_ptr2array(maxes, (B, O)) * cdef ndarray py_which = cpu_ints_ptr2array(which, (B, O)) # <<<<<<<<<<<<<< * return py_best, py_which * */ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_B); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_8); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_8 = __pyx_f_5thinc_6neural_3ops_cpu_ints_ptr2array(__pyx_v_which, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_8) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_8, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 796, __pyx_L1_error) __pyx_v_py_which = ((PyArrayObject *)__pyx_t_8); __pyx_t_8 = 0; /* "thinc/neural/ops.pyx":797 * cdef ndarray py_best = cpu_floats_ptr2array(maxes, (B, O)) * cdef ndarray py_which = cpu_ints_ptr2array(which, (B, O)) * return py_best, py_which # <<<<<<<<<<<<<< * * def backprop_max_pool(self, const float[:, ::1] d_maxes, */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(((PyObject *)__pyx_v_py_best)); __Pyx_GIVEREF(((PyObject *)__pyx_v_py_best)); PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_py_best)); __Pyx_INCREF(((PyObject *)__pyx_v_py_which)); __Pyx_GIVEREF(((PyObject *)__pyx_v_py_which)); PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)__pyx_v_py_which)); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":783 * * * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_mem); __Pyx_XDECREF((PyObject *)__pyx_v_py_best); __Pyx_XDECREF((PyObject *)__pyx_v_py_which); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":799 * return py_best, py_which * * def backprop_max_pool(self, const float[:, ::1] d_maxes, # <<<<<<<<<<<<<< * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_53backprop_max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_53backprop_max_pool = {"backprop_max_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_53backprop_max_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_53backprop_max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_d_maxes = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_which = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_lengths = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_max_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_maxes,&__pyx_n_s_which,&__pyx_n_s_lengths,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_maxes)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 1); __PYX_ERR(0, 799, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 2); __PYX_ERR(0, 799, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 3); __PYX_ERR(0, 799, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_max_pool") < 0)) __PYX_ERR(0, 799, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_d_maxes = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(values[1], 0); if (unlikely(!__pyx_v_d_maxes.memview)) __PYX_ERR(0, 799, __pyx_L3_error) __pyx_v_which = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int__const__(values[2], 0); if (unlikely(!__pyx_v_which.memview)) __PYX_ERR(0, 800, __pyx_L3_error) __pyx_v_lengths = __Pyx_PyObject_to_MemoryviewSlice_dc_int__const__(values[3], 0); if (unlikely(!__pyx_v_lengths.memview)) __PYX_ERR(0, 800, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 799, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_52backprop_max_pool(__pyx_self, __pyx_v_self, __pyx_v_d_maxes, __pyx_v_which, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_52backprop_max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_d_maxes, __Pyx_memviewslice __pyx_v_which, __Pyx_memviewslice __pyx_v_lengths) { int __pyx_v_B; int __pyx_v_O; int __pyx_v_T; PyObject *__pyx_v_length = NULL; struct __pyx_obj_5cymem_5cymem_Pool *__pyx_v_mem = 0; float *__pyx_v_dX; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; void *__pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__85) __Pyx_RefNannySetupContext("backprop_max_pool", 0); __Pyx_TraceCall("backprop_max_pool", __pyx_f[0], 799, 0, __PYX_ERR(0, 799, __pyx_L1_error)); /* "thinc/neural/ops.pyx":801 * def backprop_max_pool(self, const float[:, ::1] d_maxes, * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] # <<<<<<<<<<<<<< * cdef int O = d_maxes.shape[1] * cdef int T = 0 */ __pyx_v_B = (__pyx_v_lengths.shape[0]); /* "thinc/neural/ops.pyx":802 * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] * cdef int O = d_maxes.shape[1] # <<<<<<<<<<<<<< * cdef int T = 0 * for length in lengths[:B]: */ __pyx_v_O = (__pyx_v_d_maxes.shape[1]); /* "thinc/neural/ops.pyx":803 * cdef int B = lengths.shape[0] * cdef int O = d_maxes.shape[1] * cdef int T = 0 # <<<<<<<<<<<<<< * for length in lengths[:B]: * T += length */ __pyx_v_T = 0; /* "thinc/neural/ops.pyx":804 * cdef int O = d_maxes.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ __pyx_t_1.data = __pyx_v_lengths.data; __pyx_t_1.memview = __pyx_v_lengths.memview; __PYX_INC_MEMVIEW(&__pyx_t_1, 0); __pyx_t_2 = -1; if (unlikely(__pyx_memoryview_slice_memviewslice( &__pyx_t_1, __pyx_v_lengths.shape[0], __pyx_v_lengths.strides[0], __pyx_v_lengths.suboffsets[0], 0, 0, &__pyx_t_2, 0, __pyx_v_B, 0, 0, 1, 0, 1) < 0)) { __PYX_ERR(0, 804, __pyx_L1_error) } __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_1, 1, (PyObject *(*)(char *)) __pyx_memview_get_int__const__, (int (*)(char *, PyObject *)) NULL, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __pyx_t_1.memview = NULL; __pyx_t_1.data = NULL; if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 804, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 804, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 804, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } } else { __pyx_t_3 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 804, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":805 * cdef int T = 0 * for length in lengths[:B]: * T += length # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyNumber_InPlaceAdd(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_7); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 805, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_T = __pyx_t_2; /* "thinc/neural/ops.pyx":804 * cdef int O = d_maxes.shape[1] * cdef int T = 0 * for length in lengths[:B]: # <<<<<<<<<<<<<< * T += length * cdef Pool mem = Pool() */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":806 * for length in lengths[:B]: * T += length * cdef Pool mem = Pool() # <<<<<<<<<<<<<< * dX = mem.alloc(T * O, sizeof(float)) * */ __pyx_t_4 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5cymem_5cymem_Pool)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_mem = ((struct __pyx_obj_5cymem_5cymem_Pool *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":807 * T += length * cdef Pool mem = Pool() * dX = mem.alloc(T * O, sizeof(float)) # <<<<<<<<<<<<<< * * cpu_backprop_max_pool(dX, */ __pyx_t_8 = ((struct __pyx_vtabstruct_5cymem_5cymem_Pool *)__pyx_v_mem->__pyx_vtab)->alloc(__pyx_v_mem, (__pyx_v_T * __pyx_v_O), (sizeof(float))); if (unlikely(__pyx_t_8 == ((void *)NULL))) __PYX_ERR(0, 807, __pyx_L1_error) __pyx_v_dX = ((float *)__pyx_t_8); /* "thinc/neural/ops.pyx":810 * * cpu_backprop_max_pool(dX, * &d_maxes[0,0], &which[0, 0], &lengths[0], B, T, O) # <<<<<<<<<<<<<< * * return cpu_floats_ptr2array(dX, (T, O)) */ __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_2 = -1; if (__pyx_t_9 < 0) { __pyx_t_9 += __pyx_v_d_maxes.shape[0]; if (unlikely(__pyx_t_9 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_9 >= __pyx_v_d_maxes.shape[0])) __pyx_t_2 = 0; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_d_maxes.shape[1]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_2 = 1; } else if (unlikely(__pyx_t_10 >= __pyx_v_d_maxes.shape[1])) __pyx_t_2 = 1; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 810, __pyx_L1_error) } __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_2 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_v_which.shape[0]; if (unlikely(__pyx_t_11 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_v_which.shape[0])) __pyx_t_2 = 0; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_v_which.shape[1]; if (unlikely(__pyx_t_12 < 0)) __pyx_t_2 = 1; } else if (unlikely(__pyx_t_12 >= __pyx_v_which.shape[1])) __pyx_t_2 = 1; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 810, __pyx_L1_error) } __pyx_t_13 = 0; __pyx_t_2 = -1; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_v_lengths.shape[0]; if (unlikely(__pyx_t_13 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_13 >= __pyx_v_lengths.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 810, __pyx_L1_error) } /* "thinc/neural/ops.pyx":809 * dX = mem.alloc(T * O, sizeof(float)) * * cpu_backprop_max_pool(dX, # <<<<<<<<<<<<<< * &d_maxes[0,0], &which[0, 0], &lengths[0], B, T, O) * */ __pyx_f_5thinc_6neural_3ops_cpu_backprop_max_pool(__pyx_v_dX, (&(*((float const *) ( /* dim=1 */ ((char *) (((float const *) ( /* dim=0 */ (__pyx_v_d_maxes.data + __pyx_t_9 * __pyx_v_d_maxes.strides[0]) )) + __pyx_t_10)) )))), (&(*((int const *) ( /* dim=1 */ ((char *) (((int const *) ( /* dim=0 */ (__pyx_v_which.data + __pyx_t_11 * __pyx_v_which.strides[0]) )) + __pyx_t_12)) )))), (&(*((int const *) ( /* dim=0 */ ((char *) (((int const *) __pyx_v_lengths.data) + __pyx_t_13)) )))), __pyx_v_B, __pyx_v_T, __pyx_v_O); /* "thinc/neural/ops.pyx":812 * &d_maxes[0,0], &which[0, 0], &lengths[0], B, T, O) * * return cpu_floats_ptr2array(dX, (T, O)) # <<<<<<<<<<<<<< * * def add_sum(self, np.ndarray out, np.ndarray to_sum): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_T); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_O); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); __pyx_t_4 = 0; __pyx_t_7 = 0; __pyx_t_7 = __pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(__pyx_v_dX, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":799 * return py_best, py_which * * def backprop_max_pool(self, const float[:, ::1] d_maxes, # <<<<<<<<<<<<<< * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] */ /* function exit code */ __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.backprop_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_length); __Pyx_XDECREF((PyObject *)__pyx_v_mem); __PYX_XDEC_MEMVIEW(&__pyx_v_d_maxes, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_which, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_lengths, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":814 * return cpu_floats_ptr2array(dX, (T, O)) * * def add_sum(self, np.ndarray out, np.ndarray to_sum): # <<<<<<<<<<<<<< * VecVec.batch_add_i(out.data, * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_55add_sum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_55add_sum = {"add_sum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_55add_sum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_55add_sum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_out = 0; PyArrayObject *__pyx_v_to_sum = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_sum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_out,&__pyx_n_s_to_sum,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, 1); __PYX_ERR(0, 814, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_to_sum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, 2); __PYX_ERR(0, 814, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add_sum") < 0)) __PYX_ERR(0, 814, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_out = ((PyArrayObject *)values[1]); __pyx_v_to_sum = ((PyArrayObject *)values[2]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add_sum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 814, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.add_sum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_out), __pyx_ptype_5numpy_ndarray, 1, "out", 0))) __PYX_ERR(0, 814, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_to_sum), __pyx_ptype_5numpy_ndarray, 1, "to_sum", 0))) __PYX_ERR(0, 814, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_54add_sum(__pyx_self, __pyx_v_self, __pyx_v_out, __pyx_v_to_sum); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_54add_sum(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyArrayObject *__pyx_v_out, PyArrayObject *__pyx_v_to_sum) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__86) __Pyx_RefNannySetupContext("add_sum", 0); __Pyx_TraceCall("add_sum", __pyx_f[0], 814, 0, __PYX_ERR(0, 814, __pyx_L1_error)); /* "thinc/neural/ops.pyx":815 * * def add_sum(self, np.ndarray out, np.ndarray to_sum): * VecVec.batch_add_i(out.data, # <<<<<<<<<<<<<< * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) * */ __pyx_f_5thinc_6linalg_6VecVec_batch_add_i(((float *)__pyx_v_out->data), ((float const *)__pyx_v_to_sum->data), 1., (__pyx_v_to_sum->dimensions[1]), (__pyx_v_to_sum->dimensions[0])); /* "thinc/neural/ops.pyx":814 * return cpu_floats_ptr2array(dX, (T, O)) * * def add_sum(self, np.ndarray out, np.ndarray to_sum): # <<<<<<<<<<<<<< * VecVec.batch_add_i(out.data, * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.add_sum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":818 * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): # <<<<<<<<<<<<<< * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_57scatter_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_57scatter_add = {"scatter_add", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_57scatter_add, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_57scatter_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyArrayObject *__pyx_v_out = 0; PyArrayObject *__pyx_v_ids = 0; PyArrayObject *__pyx_v_inputs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("scatter_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_out,&__pyx_n_s_ids,&__pyx_n_s_inputs,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 1); __PYX_ERR(0, 818, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ids)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 2); __PYX_ERR(0, 818, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 3); __PYX_ERR(0, 818, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "scatter_add") < 0)) __PYX_ERR(0, 818, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_out = ((PyArrayObject *)values[1]); __pyx_v_ids = ((PyArrayObject *)values[2]); __pyx_v_inputs = ((PyArrayObject *)values[3]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 818, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.scatter_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_out), __pyx_ptype_5numpy_ndarray, 1, "out", 0))) __PYX_ERR(0, 818, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_ids), __pyx_ptype_5numpy_ndarray, 1, "ids", 0))) __PYX_ERR(0, 818, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_inputs), __pyx_ptype_5numpy_ndarray, 1, "inputs", 0))) __PYX_ERR(0, 818, __pyx_L1_error) __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_56scatter_add(__pyx_self, __pyx_v_self, __pyx_v_out, __pyx_v_ids, __pyx_v_inputs); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_56scatter_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyArrayObject *__pyx_v_out, PyArrayObject *__pyx_v_ids, PyArrayObject *__pyx_v_inputs) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__87) __Pyx_RefNannySetupContext("scatter_add", 0); __Pyx_TraceCall("scatter_add", __pyx_f[0], 818, 0, __PYX_ERR(0, 818, __pyx_L1_error)); /* "thinc/neural/ops.pyx":819 * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): * if out.dtype == 'float32' \ # <<<<<<<<<<<<<< * and ids.dtype == 'int32' \ * and inputs.dtype == 'float32' \ */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_out), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_t_2, __pyx_n_s_float32, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 819, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":820 * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ # <<<<<<<<<<<<<< * and inputs.dtype == 'float32' \ * and out.flags.c_contiguous \ */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ids), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_t_2, __pyx_n_s_int32, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 820, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":821 * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ * and inputs.dtype == 'float32' \ # <<<<<<<<<<<<<< * and out.flags.c_contiguous \ * and ids.flags.c_contiguous \ */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_inputs), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_t_2, __pyx_n_s_float32, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 821, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":822 * and ids.dtype == 'int32' \ * and inputs.dtype == 'float32' \ * and out.flags.c_contiguous \ # <<<<<<<<<<<<<< * and ids.flags.c_contiguous \ * and inputs.flags.c_contiguous \ */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_out), __pyx_n_s_flags); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_c_contiguous); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 822, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":823 * and inputs.dtype == 'float32' \ * and out.flags.c_contiguous \ * and ids.flags.c_contiguous \ # <<<<<<<<<<<<<< * and inputs.flags.c_contiguous \ * and ids.ndim == 1 \ */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ids), __pyx_n_s_flags); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_c_contiguous); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 823, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":824 * and out.flags.c_contiguous \ * and ids.flags.c_contiguous \ * and inputs.flags.c_contiguous \ # <<<<<<<<<<<<<< * and ids.ndim == 1 \ * and out.ndim == 2 \ */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_inputs), __pyx_n_s_flags); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_c_contiguous); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 824, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":825 * and ids.flags.c_contiguous \ * and inputs.flags.c_contiguous \ * and ids.ndim == 1 \ # <<<<<<<<<<<<<< * and out.ndim == 2 \ * and inputs.ndim == 2 \ */ __pyx_t_3 = ((__pyx_v_ids->nd == 1) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":826 * and inputs.flags.c_contiguous \ * and ids.ndim == 1 \ * and out.ndim == 2 \ # <<<<<<<<<<<<<< * and inputs.ndim == 2 \ * and inputs.shape[0] == ids.shape[0] \ */ __pyx_t_3 = ((__pyx_v_out->nd == 2) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":827 * and ids.ndim == 1 \ * and out.ndim == 2 \ * and inputs.ndim == 2 \ # <<<<<<<<<<<<<< * and inputs.shape[0] == ids.shape[0] \ * and inputs.shape[1] == out.shape[1]: */ __pyx_t_3 = ((__pyx_v_inputs->nd == 2) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":828 * and out.ndim == 2 \ * and inputs.ndim == 2 \ * and inputs.shape[0] == ids.shape[0] \ # <<<<<<<<<<<<<< * and inputs.shape[1] == out.shape[1]: * cpu_scatter_add(out.data, */ __pyx_t_3 = (((__pyx_v_inputs->dimensions[0]) == (__pyx_v_ids->dimensions[0])) != 0); if (__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } /* "thinc/neural/ops.pyx":829 * and inputs.ndim == 2 \ * and inputs.shape[0] == ids.shape[0] \ * and inputs.shape[1] == out.shape[1]: # <<<<<<<<<<<<<< * cpu_scatter_add(out.data, * ids.data, inputs.data, */ __pyx_t_3 = (((__pyx_v_inputs->dimensions[1]) == (__pyx_v_out->dimensions[1])) != 0); __pyx_t_1 = __pyx_t_3; __pyx_L4_bool_binop_done:; /* "thinc/neural/ops.pyx":819 * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): * if out.dtype == 'float32' \ # <<<<<<<<<<<<<< * and ids.dtype == 'int32' \ * and inputs.dtype == 'float32' \ */ if (__pyx_t_1) { /* "thinc/neural/ops.pyx":830 * and inputs.shape[0] == ids.shape[0] \ * and inputs.shape[1] == out.shape[1]: * cpu_scatter_add(out.data, # <<<<<<<<<<<<<< * ids.data, inputs.data, * ids.shape[0], out.shape[1]) */ __pyx_f_5thinc_6neural_3ops_cpu_scatter_add(((float *)__pyx_v_out->data), ((int *)__pyx_v_ids->data), ((float *)__pyx_v_inputs->data), (__pyx_v_ids->dimensions[0]), (__pyx_v_out->dimensions[1])); /* "thinc/neural/ops.pyx":819 * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): * if out.dtype == 'float32' \ # <<<<<<<<<<<<<< * and ids.dtype == 'int32' \ * and inputs.dtype == 'float32' \ */ goto __pyx_L3; } /* "thinc/neural/ops.pyx":834 * ids.shape[0], out.shape[1]) * else: * self.xp.add.at(out, ids, inputs) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ /*else*/ { __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_add); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_at); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_5, ((PyObject *)__pyx_v_out), ((PyObject *)__pyx_v_ids), ((PyObject *)__pyx_v_inputs)}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 3+__pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_5, ((PyObject *)__pyx_v_out), ((PyObject *)__pyx_v_ids), ((PyObject *)__pyx_v_inputs)}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 3+__pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_7 = PyTuple_New(3+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(((PyObject *)__pyx_v_out)); __Pyx_GIVEREF(((PyObject *)__pyx_v_out)); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, ((PyObject *)__pyx_v_out)); __Pyx_INCREF(((PyObject *)__pyx_v_ids)); __Pyx_GIVEREF(((PyObject *)__pyx_v_ids)); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, ((PyObject *)__pyx_v_ids)); __Pyx_INCREF(((PyObject *)__pyx_v_inputs)); __Pyx_GIVEREF(((PyObject *)__pyx_v_inputs)); PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_6, ((PyObject *)__pyx_v_inputs)); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "thinc/neural/ops.pyx":818 * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): # <<<<<<<<<<<<<< * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.scatter_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":838 * @cython.boundscheck(False) * @cython.wraparound(False) * def adam(self, float[::1] weights, float[::1] gradient, float[::1] mom1, # <<<<<<<<<<<<<< * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_59adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_59adam = {"adam", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_59adam, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_59adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; __Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_gradient = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_mom1 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_mom2 = { 0, 0, { 0 }, { 0 }, { 0 } }; float __pyx_v_beta1; float __pyx_v_beta2; float __pyx_v_eps; float __pyx_v_learn_rate; CYTHON_UNUSED float __pyx_v_mod_rate; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("adam (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_weights,&__pyx_n_s_gradient,&__pyx_n_s_mom1,&__pyx_n_s_mom2,&__pyx_n_s_beta1,&__pyx_n_s_beta2,&__pyx_n_s_eps,&__pyx_n_s_learn_rate,&__pyx_n_s_mod_rate,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 1); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 2); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 3); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 4); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 5); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 6); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_eps)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 7); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_learn_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 8); __PYX_ERR(0, 838, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mod_rate); if (value) { values[9] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "adam") < 0)) __PYX_ERR(0, 838, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 838, __pyx_L3_error) __pyx_v_gradient = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_gradient.memview)) __PYX_ERR(0, 838, __pyx_L3_error) __pyx_v_mom1 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mom1.memview)) __PYX_ERR(0, 838, __pyx_L3_error) __pyx_v_mom2 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_mom2.memview)) __PYX_ERR(0, 839, __pyx_L3_error) __pyx_v_beta1 = __pyx_PyFloat_AsFloat(values[5]); if (unlikely((__pyx_v_beta1 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 839, __pyx_L3_error) __pyx_v_beta2 = __pyx_PyFloat_AsFloat(values[6]); if (unlikely((__pyx_v_beta2 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 839, __pyx_L3_error) __pyx_v_eps = __pyx_PyFloat_AsFloat(values[7]); if (unlikely((__pyx_v_eps == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 839, __pyx_L3_error) __pyx_v_learn_rate = __pyx_PyFloat_AsFloat(values[8]); if (unlikely((__pyx_v_learn_rate == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 840, __pyx_L3_error) if (values[9]) { __pyx_v_mod_rate = __pyx_PyFloat_AsFloat(values[9]); if (unlikely((__pyx_v_mod_rate == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 840, __pyx_L3_error) } else { __pyx_v_mod_rate = ((float)((double)1.)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 838, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_58adam(__pyx_self, __pyx_v_self, __pyx_v_weights, __pyx_v_gradient, __pyx_v_mom1, __pyx_v_mom2, __pyx_v_beta1, __pyx_v_beta2, __pyx_v_eps, __pyx_v_learn_rate, __pyx_v_mod_rate); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_58adam(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, __Pyx_memviewslice __pyx_v_weights, __Pyx_memviewslice __pyx_v_gradient, __Pyx_memviewslice __pyx_v_mom1, __Pyx_memviewslice __pyx_v_mom2, float __pyx_v_beta1, float __pyx_v_beta2, float __pyx_v_eps, float __pyx_v_learn_rate, CYTHON_UNUSED float __pyx_v_mod_rate) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__88) __Pyx_RefNannySetupContext("adam", 0); __Pyx_TraceCall("adam", __pyx_f[0], 838, 0, __PYX_ERR(0, 838, __pyx_L1_error)); /* "thinc/neural/ops.pyx":841 * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): * _adam_momentum(&gradient[0], &mom1[0], &mom2[0], # <<<<<<<<<<<<<< * weights.shape[0], beta1, beta2, eps, learn_rate) * VecVec.add_i(&weights[0], */ __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":842 * float learn_rate, float mod_rate=1.): * _adam_momentum(&gradient[0], &mom1[0], &mom2[0], * weights.shape[0], beta1, beta2, eps, learn_rate) # <<<<<<<<<<<<<< * VecVec.add_i(&weights[0], * &gradient[0], -learn_rate, weights.shape[0]) */ __pyx_f_5thinc_6neural_3ops__adam_momentum((&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_gradient.data) + __pyx_t_1)) )))), (&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_mom1.data) + __pyx_t_2)) )))), (&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_mom2.data) + __pyx_t_3)) )))), (__pyx_v_weights.shape[0]), __pyx_v_beta1, __pyx_v_beta2, __pyx_v_eps, __pyx_v_learn_rate); /* "thinc/neural/ops.pyx":843 * _adam_momentum(&gradient[0], &mom1[0], &mom2[0], * weights.shape[0], beta1, beta2, eps, learn_rate) * VecVec.add_i(&weights[0], # <<<<<<<<<<<<<< * &gradient[0], -learn_rate, weights.shape[0]) * memset(&gradient[0], 0, gradient.size * sizeof(float)) */ __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":844 * weights.shape[0], beta1, beta2, eps, learn_rate) * VecVec.add_i(&weights[0], * &gradient[0], -learn_rate, weights.shape[0]) # <<<<<<<<<<<<<< * memset(&gradient[0], 0, gradient.size * sizeof(float)) * */ __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":843 * _adam_momentum(&gradient[0], &mom1[0], &mom2[0], * weights.shape[0], beta1, beta2, eps, learn_rate) * VecVec.add_i(&weights[0], # <<<<<<<<<<<<<< * &gradient[0], -learn_rate, weights.shape[0]) * memset(&gradient[0], 0, gradient.size * sizeof(float)) */ __pyx_f_5thinc_6linalg_6VecVec_add_i((&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_weights.data) + __pyx_t_3)) )))), (&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_gradient.data) + __pyx_t_2)) )))), (-__pyx_v_learn_rate), (__pyx_v_weights.shape[0])); /* "thinc/neural/ops.pyx":845 * VecVec.add_i(&weights[0], * &gradient[0], -learn_rate, weights.shape[0]) * memset(&gradient[0], 0, gradient.size * sizeof(float)) # <<<<<<<<<<<<<< * * def ngrams(self, int n, const uint64_t[::1] keys_): */ __pyx_t_2 = 0; __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_gradient, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(float))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Multiply(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 845, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_7 = __Pyx_PyInt_As_size_t(__pyx_t_6); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 845, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; (void)(memset((&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_gradient.data) + __pyx_t_2)) )))), 0, __pyx_t_7)); /* "thinc/neural/ops.pyx":838 * @cython.boundscheck(False) * @cython.wraparound(False) * def adam(self, float[::1] weights, float[::1] gradient, float[::1] mom1, # <<<<<<<<<<<<<< * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_gradient, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_mom1, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_mom2, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":847 * memset(&gradient[0], 0, gradient.size * sizeof(float)) * * def ngrams(self, int n, const uint64_t[::1] keys_): # <<<<<<<<<<<<<< * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_61ngrams(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_61ngrams = {"ngrams", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_61ngrams, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_61ngrams(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; int __pyx_v_n; __Pyx_memviewslice __pyx_v_keys_ = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("ngrams (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_n,&__pyx_n_s_keys,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("ngrams", 1, 3, 3, 1); __PYX_ERR(0, 847, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_keys)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("ngrams", 1, 3, 3, 2); __PYX_ERR(0, 847, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "ngrams") < 0)) __PYX_ERR(0, 847, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_n = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_n == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 847, __pyx_L3_error) __pyx_v_keys_ = __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t__const__(values[2], 0); if (unlikely(!__pyx_v_keys_.memview)) __PYX_ERR(0, 847, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("ngrams", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 847, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.ngrams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_60ngrams(__pyx_self, __pyx_v_self, __pyx_v_n, __pyx_v_keys_); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_60ngrams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, int __pyx_v_n, __Pyx_memviewslice __pyx_v_keys_) { uint64_t *__pyx_v_keys; Py_ssize_t __pyx_v_length; PyArrayObject *__pyx_v_output_ = 0; uint64_t *__pyx_v_output; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; Py_ssize_t __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__89) __Pyx_RefNannySetupContext("ngrams", 0); __Pyx_TraceCall("ngrams", __pyx_f[0], 847, 0, __PYX_ERR(0, 847, __pyx_L1_error)); /* "thinc/neural/ops.pyx":848 * * def ngrams(self, int n, const uint64_t[::1] keys_): * keys = &keys_[0] # <<<<<<<<<<<<<< * length = max(0, keys_.shape[0]-n) * cdef np.ndarray output_ = self.allocate((length,), dtype='uint64') */ __pyx_t_1 = 0; __pyx_t_2 = -1; if (__pyx_t_1 < 0) { __pyx_t_1 += __pyx_v_keys_.shape[0]; if (unlikely(__pyx_t_1 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_1 >= __pyx_v_keys_.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 848, __pyx_L1_error) } __pyx_v_keys = ((uint64_t *)(&(*((uint64_t const *) ( /* dim=0 */ ((char *) (((uint64_t const *) __pyx_v_keys_.data) + __pyx_t_1)) ))))); /* "thinc/neural/ops.pyx":849 * def ngrams(self, int n, const uint64_t[::1] keys_): * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) # <<<<<<<<<<<<<< * cdef np.ndarray output_ = self.allocate((length,), dtype='uint64') * output = output_.data */ __pyx_t_3 = ((__pyx_v_keys_.shape[0]) - __pyx_v_n); __pyx_t_4 = 0; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_length = __pyx_t_5; /* "thinc/neural/ops.pyx":850 * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) * cdef np.ndarray output_ = self.allocate((length,), dtype='uint64') # <<<<<<<<<<<<<< * output = output_.data * for i in range(keys_.shape[0]-n): */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_dtype, __pyx_n_s_uint64) < 0) __PYX_ERR(0, 850, __pyx_L1_error) __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 850, __pyx_L1_error) __pyx_v_output_ = ((PyArrayObject *)__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":851 * length = max(0, keys_.shape[0]-n) * cdef np.ndarray output_ = self.allocate((length,), dtype='uint64') * output = output_.data # <<<<<<<<<<<<<< * for i in range(keys_.shape[0]-n): * output[i] = hash64(&keys[i], n*sizeof(keys[0]), 0) */ __pyx_v_output = ((uint64_t *)__pyx_v_output_->data); /* "thinc/neural/ops.pyx":852 * cdef np.ndarray output_ = self.allocate((length,), dtype='uint64') * output = output_.data * for i in range(keys_.shape[0]-n): # <<<<<<<<<<<<<< * output[i] = hash64(&keys[i], n*sizeof(keys[0]), 0) * return output_ */ __pyx_t_5 = ((__pyx_v_keys_.shape[0]) - __pyx_v_n); __pyx_t_3 = __pyx_t_5; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_3; __pyx_t_10+=1) { __pyx_v_i = __pyx_t_10; /* "thinc/neural/ops.pyx":853 * output = output_.data * for i in range(keys_.shape[0]-n): * output[i] = hash64(&keys[i], n*sizeof(keys[0]), 0) # <<<<<<<<<<<<<< * return output_ * */ (__pyx_v_output[__pyx_v_i]) = __pyx_f_10murmurhash_4mrmr_hash64((&(__pyx_v_keys[__pyx_v_i])), (__pyx_v_n * (sizeof((__pyx_v_keys[0])))), 0); } /* "thinc/neural/ops.pyx":854 * for i in range(keys_.shape[0]-n): * output[i] = hash64(&keys[i], n*sizeof(keys[0]), 0) * return output_ # <<<<<<<<<<<<<< * * def position_encode(self, int N, int D, int period=10000, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_output_)); __pyx_r = ((PyObject *)__pyx_v_output_); goto __pyx_L0; /* "thinc/neural/ops.pyx":847 * memset(&gradient[0], 0, gradient.size * sizeof(float)) * * def ngrams(self, int n, const uint64_t[::1] keys_): # <<<<<<<<<<<<<< * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.ngrams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_output_); __PYX_XDEC_MEMVIEW(&__pyx_v_keys_, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":856 * return output_ * * def position_encode(self, int N, int D, int period=10000, out=None): # <<<<<<<<<<<<<< * cdef np.ndarray out_ * if out is None: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_63position_encode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_8NumpyOps_63position_encode = {"position_encode", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_8NumpyOps_63position_encode, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_8NumpyOps_63position_encode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; int __pyx_v_N; int __pyx_v_D; int __pyx_v_period; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("position_encode (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_N,&__pyx_n_s_D,&__pyx_n_s_period,&__pyx_n_s_out,0}; PyObject* values[5] = {0,0,0,0,0}; values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_N)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("position_encode", 0, 3, 5, 1); __PYX_ERR(0, 856, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_D)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("position_encode", 0, 3, 5, 2); __PYX_ERR(0, 856, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_period); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "position_encode") < 0)) __PYX_ERR(0, 856, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_N = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 856, __pyx_L3_error) __pyx_v_D = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_D == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 856, __pyx_L3_error) if (values[3]) { __pyx_v_period = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_period == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 856, __pyx_L3_error) } else { __pyx_v_period = ((int)((int)0x2710)); } __pyx_v_out = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("position_encode", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 856, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.position_encode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_8NumpyOps_62position_encode(__pyx_self, __pyx_v_self, __pyx_v_N, __pyx_v_D, __pyx_v_period, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_8NumpyOps_62position_encode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, int __pyx_v_N, int __pyx_v_D, int __pyx_v_period, PyObject *__pyx_v_out) { PyArrayObject *__pyx_v_out_ = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__90) __Pyx_RefNannySetupContext("position_encode", 0); __Pyx_TraceCall("position_encode", __pyx_f[0], 856, 0, __PYX_ERR(0, 856, __pyx_L1_error)); /* "thinc/neural/ops.pyx":858 * def position_encode(self, int N, int D, int period=10000, out=None): * cdef np.ndarray out_ * if out is None: # <<<<<<<<<<<<<< * out_ = self.allocate((N, D)) * else: */ __pyx_t_1 = (__pyx_v_out == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":859 * cdef np.ndarray out_ * if out is None: * out_ = self.allocate((N, D)) # <<<<<<<<<<<<<< * else: * out_ = out */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_allocate); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_7); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 859, __pyx_L1_error) __pyx_v_out_ = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":858 * def position_encode(self, int N, int D, int period=10000, out=None): * cdef np.ndarray out_ * if out is None: # <<<<<<<<<<<<<< * out_ = self.allocate((N, D)) * else: */ goto __pyx_L3; } /* "thinc/neural/ops.pyx":861 * out_ = self.allocate((N, D)) * else: * out_ = out # <<<<<<<<<<<<<< * assert out_.shape[0] == N * assert out_.shape[1] == D */ /*else*/ { if (!(likely(((__pyx_v_out) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 861, __pyx_L1_error) __pyx_t_3 = __pyx_v_out; __Pyx_INCREF(__pyx_t_3); __pyx_v_out_ = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "thinc/neural/ops.pyx":862 * else: * out_ = out * assert out_.shape[0] == N # <<<<<<<<<<<<<< * assert out_.shape[1] == D * cpu_position_encode(out_.data, period, N, D) */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_->dimensions[0]) == __pyx_v_N) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 862, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":863 * out_ = out * assert out_.shape[0] == N * assert out_.shape[1] == D # <<<<<<<<<<<<<< * cpu_position_encode(out_.data, period, N, D) * return out_ */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((__pyx_v_out_->dimensions[1]) == __pyx_v_D) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 863, __pyx_L1_error) } } #endif /* "thinc/neural/ops.pyx":864 * assert out_.shape[0] == N * assert out_.shape[1] == D * cpu_position_encode(out_.data, period, N, D) # <<<<<<<<<<<<<< * return out_ * */ __pyx_f_5thinc_6neural_3ops_cpu_position_encode(((float *)__pyx_v_out_->data), __pyx_v_period, __pyx_v_N, __pyx_v_D); /* "thinc/neural/ops.pyx":865 * assert out_.shape[1] == D * cpu_position_encode(out_.data, period, N, D) * return out_ # <<<<<<<<<<<<<< * * cdef void cpu_position_encode(float* output, float period, int N, int D) nogil: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_out_)); __pyx_r = ((PyObject *)__pyx_v_out_); goto __pyx_L0; /* "thinc/neural/ops.pyx":856 * return output_ * * def position_encode(self, int N, int D, int period=10000, out=None): # <<<<<<<<<<<<<< * cdef np.ndarray out_ * if out is None: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.NumpyOps.position_encode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out_); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":867 * return out_ * * cdef void cpu_position_encode(float* output, float period, int N, int D) nogil: # <<<<<<<<<<<<<< * cdef float pos, d * cdef int j */ static void __pyx_f_5thinc_6neural_3ops_cpu_position_encode(float *__pyx_v_output, float __pyx_v_period, int __pyx_v_N, int __pyx_v_D) { float __pyx_v_pos; float __pyx_v_d; int __pyx_v_j; float __pyx_v_dimensions; int __pyx_v_i; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_position_encode", __pyx_f[0], 867, 1, __PYX_ERR(0, 867, __pyx_L1_error)); /* "thinc/neural/ops.pyx":870 * cdef float pos, d * cdef int j * cdef float dimensions = D # <<<<<<<<<<<<<< * for i in range(N): * pos = i */ __pyx_v_dimensions = __pyx_v_D; /* "thinc/neural/ops.pyx":871 * cdef int j * cdef float dimensions = D * for i in range(N): # <<<<<<<<<<<<<< * pos = i * j = 0 */ __pyx_t_1 = __pyx_v_N; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":872 * cdef float dimensions = D * for i in range(N): * pos = i # <<<<<<<<<<<<<< * j = 0 * d = 0 */ __pyx_v_pos = __pyx_v_i; /* "thinc/neural/ops.pyx":873 * for i in range(N): * pos = i * j = 0 # <<<<<<<<<<<<<< * d = 0 * while (j+1) < D: */ __pyx_v_j = 0; /* "thinc/neural/ops.pyx":874 * pos = i * j = 0 * d = 0 # <<<<<<<<<<<<<< * while (j+1) < D: * d = j */ __pyx_v_d = 0.0; /* "thinc/neural/ops.pyx":875 * j = 0 * d = 0 * while (j+1) < D: # <<<<<<<<<<<<<< * d = j * output[j] = sinf(pos / period ** (2 * d / dimensions)) */ while (1) { __pyx_t_4 = (((__pyx_v_j + 1) < __pyx_v_D) != 0); if (!__pyx_t_4) break; /* "thinc/neural/ops.pyx":876 * d = 0 * while (j+1) < D: * d = j # <<<<<<<<<<<<<< * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) */ __pyx_v_d = __pyx_v_j; /* "thinc/neural/ops.pyx":877 * while (j+1) < D: * d = j * output[j] = sinf(pos / period ** (2 * d / dimensions)) # <<<<<<<<<<<<<< * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) * j += 2 */ (__pyx_v_output[__pyx_v_j]) = sinf((__pyx_v_pos / powf(__pyx_v_period, ((2.0 * __pyx_v_d) / __pyx_v_dimensions)))); /* "thinc/neural/ops.pyx":878 * d = j * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) # <<<<<<<<<<<<<< * j += 2 * if j < D: */ (__pyx_v_output[(__pyx_v_j + 1)]) = cosf((__pyx_v_pos / powf(__pyx_v_period, ((2.0 * __pyx_v_d) / __pyx_v_dimensions)))); /* "thinc/neural/ops.pyx":879 * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) * j += 2 # <<<<<<<<<<<<<< * if j < D: * output[j] = sinf(pos / period ** (2 * d / dimensions)) */ __pyx_v_j = (__pyx_v_j + 2); } /* "thinc/neural/ops.pyx":880 * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) * j += 2 * if j < D: # <<<<<<<<<<<<<< * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output += D */ __pyx_t_4 = ((__pyx_v_j < __pyx_v_D) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":881 * j += 2 * if j < D: * output[j] = sinf(pos / period ** (2 * d / dimensions)) # <<<<<<<<<<<<<< * output += D * */ (__pyx_v_output[__pyx_v_j]) = sinf((__pyx_v_pos / powf(__pyx_v_period, ((2.0 * __pyx_v_d) / __pyx_v_dimensions)))); /* "thinc/neural/ops.pyx":880 * output[j+1] = cosf(pos / period ** (2 * d / dimensions)) * j += 2 * if j < D: # <<<<<<<<<<<<<< * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output += D */ } /* "thinc/neural/ops.pyx":882 * if j < D: * output[j] = sinf(pos / period ** (2 * d / dimensions)) * output += D # <<<<<<<<<<<<<< * * */ __pyx_v_output = (__pyx_v_output + __pyx_v_D); } /* "thinc/neural/ops.pyx":867 * return out_ * * cdef void cpu_position_encode(float* output, float period, int N, int D) nogil: # <<<<<<<<<<<<<< * cdef float pos, d * cdef int j */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_position_encode", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":885 * * * cdef void cpu_scatter_add(float* dest, # <<<<<<<<<<<<<< * const int* indices, const float* src, * int nr_id, int nr_col) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_scatter_add(float *__pyx_v_dest, int const *__pyx_v_indices, float const *__pyx_v_src, int __pyx_v_nr_id, int __pyx_v_nr_col) { int __pyx_v_i; int __pyx_v_id_; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_scatter_add", __pyx_f[0], 885, 1, __PYX_ERR(0, 885, __pyx_L1_error)); /* "thinc/neural/ops.pyx":889 * int nr_id, int nr_col) nogil: * cdef int i * for i in range(nr_id): # <<<<<<<<<<<<<< * id_ = indices[i] * if id_ >= 0: */ __pyx_t_1 = __pyx_v_nr_id; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":890 * cdef int i * for i in range(nr_id): * id_ = indices[i] # <<<<<<<<<<<<<< * if id_ >= 0: * VecVec.add_i(&dest[id_*nr_col], */ __pyx_v_id_ = (__pyx_v_indices[__pyx_v_i]); /* "thinc/neural/ops.pyx":891 * for i in range(nr_id): * id_ = indices[i] * if id_ >= 0: # <<<<<<<<<<<<<< * VecVec.add_i(&dest[id_*nr_col], * &src[i*nr_col], 1., nr_col) */ __pyx_t_4 = ((__pyx_v_id_ >= 0) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":892 * id_ = indices[i] * if id_ >= 0: * VecVec.add_i(&dest[id_*nr_col], # <<<<<<<<<<<<<< * &src[i*nr_col], 1., nr_col) * */ __pyx_f_5thinc_6linalg_6VecVec_add_i((&(__pyx_v_dest[(__pyx_v_id_ * __pyx_v_nr_col)])), (&(__pyx_v_src[(__pyx_v_i * __pyx_v_nr_col)])), 1., __pyx_v_nr_col); /* "thinc/neural/ops.pyx":891 * for i in range(nr_id): * id_ = indices[i] * if id_ >= 0: # <<<<<<<<<<<<<< * VecVec.add_i(&dest[id_*nr_col], * &src[i*nr_col], 1., nr_col) */ } } /* "thinc/neural/ops.pyx":885 * * * cdef void cpu_scatter_add(float* dest, # <<<<<<<<<<<<<< * const int* indices, const float* src, * int nr_id, int nr_col) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_scatter_add", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":897 * * @cython.cdivision(True) * cdef void _adam_momentum(weight_t* gradient, weight_t* mom1, weight_t* mom2, # <<<<<<<<<<<<<< * int nr_weight, weight_t beta1, weight_t beta2, weight_t eps, * weight_t learn_rate) nogil: */ static void __pyx_f_5thinc_6neural_3ops__adam_momentum(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_gradient, __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_mom1, __pyx_t_5thinc_8typedefs_weight_t *__pyx_v_mom2, int __pyx_v_nr_weight, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_beta1, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_beta2, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_eps, CYTHON_UNUSED __pyx_t_5thinc_8typedefs_weight_t __pyx_v_learn_rate) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_one_minus_beta1; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_one_minus_beta2; CYTHON_UNUSED int __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_buff[64]; int __pyx_v_steps; long __pyx_v_idx; long __pyx_v_step_size; long __pyx_v_j; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; long __pyx_t_5; long __pyx_t_6; long __pyx_t_7; long __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("_adam_momentum", __pyx_f[0], 897, 1, __PYX_ERR(0, 897, __pyx_L1_error)); /* "thinc/neural/ops.pyx":903 * # Assumes the learning rate adustment is calculated by the caller; * # a_t = learn_rate * sqrt(1-beta2**timestep) / (1-beta1**timestep) * cdef weight_t one_minus_beta1 = 1-beta1 # <<<<<<<<<<<<<< * cdef weight_t one_minus_beta2 = 1-beta2 * cdef weight_t m1, m2, g */ __pyx_v_one_minus_beta1 = (1.0 - __pyx_v_beta1); /* "thinc/neural/ops.pyx":904 * # a_t = learn_rate * sqrt(1-beta2**timestep) / (1-beta1**timestep) * cdef weight_t one_minus_beta1 = 1-beta1 * cdef weight_t one_minus_beta2 = 1-beta2 # <<<<<<<<<<<<<< * cdef weight_t m1, m2, g * cdef int i */ __pyx_v_one_minus_beta2 = (1.0 - __pyx_v_beta2); /* "thinc/neural/ops.pyx":909 * # Blockwise implementation is a bit faster. Adam is slooow :( * cdef weight_t[64] buff * cdef int steps = nr_weight // 64 # <<<<<<<<<<<<<< * if steps * 64 < nr_weight: * steps += 1 */ __pyx_v_steps = (__pyx_v_nr_weight / 64); /* "thinc/neural/ops.pyx":910 * cdef weight_t[64] buff * cdef int steps = nr_weight // 64 * if steps * 64 < nr_weight: # <<<<<<<<<<<<<< * steps += 1 * idx = 0 */ __pyx_t_1 = (((__pyx_v_steps * 64) < __pyx_v_nr_weight) != 0); if (__pyx_t_1) { /* "thinc/neural/ops.pyx":911 * cdef int steps = nr_weight // 64 * if steps * 64 < nr_weight: * steps += 1 # <<<<<<<<<<<<<< * idx = 0 * for i in range(steps): */ __pyx_v_steps = (__pyx_v_steps + 1); /* "thinc/neural/ops.pyx":910 * cdef weight_t[64] buff * cdef int steps = nr_weight // 64 * if steps * 64 < nr_weight: # <<<<<<<<<<<<<< * steps += 1 * idx = 0 */ } /* "thinc/neural/ops.pyx":912 * if steps * 64 < nr_weight: * steps += 1 * idx = 0 # <<<<<<<<<<<<<< * for i in range(steps): * step_size = min(64, nr_weight-idx) */ __pyx_v_idx = 0; /* "thinc/neural/ops.pyx":913 * steps += 1 * idx = 0 * for i in range(steps): # <<<<<<<<<<<<<< * step_size = min(64, nr_weight-idx) * Vec.mul_i(mom1, beta1, step_size) */ __pyx_t_2 = __pyx_v_steps; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":914 * idx = 0 * for i in range(steps): * step_size = min(64, nr_weight-idx) # <<<<<<<<<<<<<< * Vec.mul_i(mom1, beta1, step_size) * VecVec.add_i(mom1, gradient, one_minus_beta1, step_size) */ __pyx_t_5 = (__pyx_v_nr_weight - __pyx_v_idx); __pyx_t_6 = 64; if (((__pyx_t_5 < __pyx_t_6) != 0)) { __pyx_t_7 = __pyx_t_5; } else { __pyx_t_7 = __pyx_t_6; } __pyx_v_step_size = __pyx_t_7; /* "thinc/neural/ops.pyx":915 * for i in range(steps): * step_size = min(64, nr_weight-idx) * Vec.mul_i(mom1, beta1, step_size) # <<<<<<<<<<<<<< * VecVec.add_i(mom1, gradient, one_minus_beta1, step_size) * Vec.mul_i(mom2, beta2, step_size) */ __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_v_mom1, __pyx_v_beta1, __pyx_v_step_size); /* "thinc/neural/ops.pyx":916 * step_size = min(64, nr_weight-idx) * Vec.mul_i(mom1, beta1, step_size) * VecVec.add_i(mom1, gradient, one_minus_beta1, step_size) # <<<<<<<<<<<<<< * Vec.mul_i(mom2, beta2, step_size) * for j in range(step_size): */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_mom1, __pyx_v_gradient, __pyx_v_one_minus_beta1, __pyx_v_step_size); /* "thinc/neural/ops.pyx":917 * Vec.mul_i(mom1, beta1, step_size) * VecVec.add_i(mom1, gradient, one_minus_beta1, step_size) * Vec.mul_i(mom2, beta2, step_size) # <<<<<<<<<<<<<< * for j in range(step_size): * mom2[j] += one_minus_beta2 * gradient[j] ** 2 */ __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_v_mom2, __pyx_v_beta2, __pyx_v_step_size); /* "thinc/neural/ops.pyx":918 * VecVec.add_i(mom1, gradient, one_minus_beta1, step_size) * Vec.mul_i(mom2, beta2, step_size) * for j in range(step_size): # <<<<<<<<<<<<<< * mom2[j] += one_minus_beta2 * gradient[j] ** 2 * for j in range(step_size): */ __pyx_t_7 = __pyx_v_step_size; __pyx_t_5 = __pyx_t_7; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "thinc/neural/ops.pyx":919 * Vec.mul_i(mom2, beta2, step_size) * for j in range(step_size): * mom2[j] += one_minus_beta2 * gradient[j] ** 2 # <<<<<<<<<<<<<< * for j in range(step_size): * buff[j] = sqrtf(mom2[j]) */ __pyx_t_8 = __pyx_v_j; (__pyx_v_mom2[__pyx_t_8]) = ((__pyx_v_mom2[__pyx_t_8]) + (__pyx_v_one_minus_beta2 * powf((__pyx_v_gradient[__pyx_v_j]), 2.0))); } /* "thinc/neural/ops.pyx":920 * for j in range(step_size): * mom2[j] += one_minus_beta2 * gradient[j] ** 2 * for j in range(step_size): # <<<<<<<<<<<<<< * buff[j] = sqrtf(mom2[j]) * for j in range(step_size): */ __pyx_t_7 = __pyx_v_step_size; __pyx_t_5 = __pyx_t_7; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "thinc/neural/ops.pyx":921 * mom2[j] += one_minus_beta2 * gradient[j] ** 2 * for j in range(step_size): * buff[j] = sqrtf(mom2[j]) # <<<<<<<<<<<<<< * for j in range(step_size): * buff[j] += eps */ (__pyx_v_buff[__pyx_v_j]) = sqrtf((__pyx_v_mom2[__pyx_v_j])); } /* "thinc/neural/ops.pyx":922 * for j in range(step_size): * buff[j] = sqrtf(mom2[j]) * for j in range(step_size): # <<<<<<<<<<<<<< * buff[j] += eps * for j in range(step_size): */ __pyx_t_7 = __pyx_v_step_size; __pyx_t_5 = __pyx_t_7; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "thinc/neural/ops.pyx":923 * buff[j] = sqrtf(mom2[j]) * for j in range(step_size): * buff[j] += eps # <<<<<<<<<<<<<< * for j in range(step_size): * buff[j] = mom1[j] / buff[j] */ __pyx_t_8 = __pyx_v_j; (__pyx_v_buff[__pyx_t_8]) = ((__pyx_v_buff[__pyx_t_8]) + __pyx_v_eps); } /* "thinc/neural/ops.pyx":924 * for j in range(step_size): * buff[j] += eps * for j in range(step_size): # <<<<<<<<<<<<<< * buff[j] = mom1[j] / buff[j] * for j in range(step_size): */ __pyx_t_7 = __pyx_v_step_size; __pyx_t_5 = __pyx_t_7; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "thinc/neural/ops.pyx":925 * buff[j] += eps * for j in range(step_size): * buff[j] = mom1[j] / buff[j] # <<<<<<<<<<<<<< * for j in range(step_size): * gradient[j] = buff[j] */ (__pyx_v_buff[__pyx_v_j]) = ((__pyx_v_mom1[__pyx_v_j]) / (__pyx_v_buff[__pyx_v_j])); } /* "thinc/neural/ops.pyx":926 * for j in range(step_size): * buff[j] = mom1[j] / buff[j] * for j in range(step_size): # <<<<<<<<<<<<<< * gradient[j] = buff[j] * mom1 += step_size */ __pyx_t_7 = __pyx_v_step_size; __pyx_t_5 = __pyx_t_7; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "thinc/neural/ops.pyx":927 * buff[j] = mom1[j] / buff[j] * for j in range(step_size): * gradient[j] = buff[j] # <<<<<<<<<<<<<< * mom1 += step_size * mom2 += step_size */ (__pyx_v_gradient[__pyx_v_j]) = (__pyx_v_buff[__pyx_v_j]); } /* "thinc/neural/ops.pyx":928 * for j in range(step_size): * gradient[j] = buff[j] * mom1 += step_size # <<<<<<<<<<<<<< * mom2 += step_size * gradient += step_size */ __pyx_v_mom1 = (__pyx_v_mom1 + __pyx_v_step_size); /* "thinc/neural/ops.pyx":929 * gradient[j] = buff[j] * mom1 += step_size * mom2 += step_size # <<<<<<<<<<<<<< * gradient += step_size * idx += step_size */ __pyx_v_mom2 = (__pyx_v_mom2 + __pyx_v_step_size); /* "thinc/neural/ops.pyx":930 * mom1 += step_size * mom2 += step_size * gradient += step_size # <<<<<<<<<<<<<< * idx += step_size * */ __pyx_v_gradient = (__pyx_v_gradient + __pyx_v_step_size); /* "thinc/neural/ops.pyx":931 * mom2 += step_size * gradient += step_size * idx += step_size # <<<<<<<<<<<<<< * * */ __pyx_v_idx = (__pyx_v_idx + __pyx_v_step_size); } /* "thinc/neural/ops.pyx":897 * * @cython.cdivision(True) * cdef void _adam_momentum(weight_t* gradient, weight_t* mom1, weight_t* mom2, # <<<<<<<<<<<<<< * int nr_weight, weight_t beta1, weight_t beta2, weight_t eps, * weight_t learn_rate) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops._adam_momentum", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":935 * * @cython.cdivision(True) * cdef void cpu_update_averages(weight_t* ema, # <<<<<<<<<<<<<< * const weight_t* weights, int nr_weight, weight_t t, weight_t max_decay) nogil: * cdef weight_t decay = (1.0 + t) / (10.0 + t) */ static void __pyx_f_5thinc_6neural_3ops_cpu_update_averages(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_ema, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_weights, int __pyx_v_nr_weight, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_t, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_max_decay) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_decay; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_one_minus_decay; int __pyx_v_i; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_update_averages", __pyx_f[0], 935, 1, __PYX_ERR(0, 935, __pyx_L1_error)); /* "thinc/neural/ops.pyx":937 * cdef void cpu_update_averages(weight_t* ema, * const weight_t* weights, int nr_weight, weight_t t, weight_t max_decay) nogil: * cdef weight_t decay = (1.0 + t) / (10.0 + t) # <<<<<<<<<<<<<< * if decay > max_decay: * decay = max_decay */ __pyx_v_decay = ((1.0 + __pyx_v_t) / (10.0 + __pyx_v_t)); /* "thinc/neural/ops.pyx":938 * const weight_t* weights, int nr_weight, weight_t t, weight_t max_decay) nogil: * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: # <<<<<<<<<<<<<< * decay = max_decay * cdef weight_t one_minus_decay = 1-decay */ __pyx_t_1 = ((__pyx_v_decay > __pyx_v_max_decay) != 0); if (__pyx_t_1) { /* "thinc/neural/ops.pyx":939 * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: * decay = max_decay # <<<<<<<<<<<<<< * cdef weight_t one_minus_decay = 1-decay * cdef int i */ __pyx_v_decay = __pyx_v_max_decay; /* "thinc/neural/ops.pyx":938 * const weight_t* weights, int nr_weight, weight_t t, weight_t max_decay) nogil: * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: # <<<<<<<<<<<<<< * decay = max_decay * cdef weight_t one_minus_decay = 1-decay */ } /* "thinc/neural/ops.pyx":940 * if decay > max_decay: * decay = max_decay * cdef weight_t one_minus_decay = 1-decay # <<<<<<<<<<<<<< * cdef int i * for i in range(nr_weight): # num_threads=4, schedule='static'): */ __pyx_v_one_minus_decay = (1.0 - __pyx_v_decay); /* "thinc/neural/ops.pyx":942 * cdef weight_t one_minus_decay = 1-decay * cdef int i * for i in range(nr_weight): # num_threads=4, schedule='static'): # <<<<<<<<<<<<<< * ema[i] -= one_minus_decay * (ema[i] - weights[i]) * */ __pyx_t_2 = __pyx_v_nr_weight; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "thinc/neural/ops.pyx":943 * cdef int i * for i in range(nr_weight): # num_threads=4, schedule='static'): * ema[i] -= one_minus_decay * (ema[i] - weights[i]) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_i; (__pyx_v_ema[__pyx_t_5]) = ((__pyx_v_ema[__pyx_t_5]) - (__pyx_v_one_minus_decay * ((__pyx_v_ema[__pyx_v_i]) - (__pyx_v_weights[__pyx_v_i])))); } /* "thinc/neural/ops.pyx":935 * * @cython.cdivision(True) * cdef void cpu_update_averages(weight_t* ema, # <<<<<<<<<<<<<< * const weight_t* weights, int nr_weight, weight_t t, weight_t max_decay) nogil: * cdef weight_t decay = (1.0 + t) / (10.0 + t) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_update_averages", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":946 * * * cdef void cpu_mish(weight_t* Y, const weight_t* X, float threshold, int N) nogil: # <<<<<<<<<<<<<< * cdef float one = 1. * for i in range(N): */ static void __pyx_f_5thinc_6neural_3ops_cpu_mish(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_Y, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, float __pyx_v_threshold, int __pyx_v_N) { float __pyx_v_one; int __pyx_v_i; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_mish", __pyx_f[0], 946, 1, __PYX_ERR(0, 946, __pyx_L1_error)); /* "thinc/neural/ops.pyx":947 * * cdef void cpu_mish(weight_t* Y, const weight_t* X, float threshold, int N) nogil: * cdef float one = 1. # <<<<<<<<<<<<<< * for i in range(N): * if X[i] >= threshold: */ __pyx_v_one = 1.; /* "thinc/neural/ops.pyx":948 * cdef void cpu_mish(weight_t* Y, const weight_t* X, float threshold, int N) nogil: * cdef float one = 1. * for i in range(N): # <<<<<<<<<<<<<< * if X[i] >= threshold: * Y[i] = X[i] */ __pyx_t_1 = __pyx_v_N; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":949 * cdef float one = 1. * for i in range(N): * if X[i] >= threshold: # <<<<<<<<<<<<<< * Y[i] = X[i] * else: */ __pyx_t_4 = (((__pyx_v_X[__pyx_v_i]) >= __pyx_v_threshold) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":950 * for i in range(N): * if X[i] >= threshold: * Y[i] = X[i] # <<<<<<<<<<<<<< * else: * Y[i] = X[i] * tanhf(logf(one + expf(X[i]))) */ (__pyx_v_Y[__pyx_v_i]) = (__pyx_v_X[__pyx_v_i]); /* "thinc/neural/ops.pyx":949 * cdef float one = 1. * for i in range(N): * if X[i] >= threshold: # <<<<<<<<<<<<<< * Y[i] = X[i] * else: */ goto __pyx_L5; } /* "thinc/neural/ops.pyx":952 * Y[i] = X[i] * else: * Y[i] = X[i] * tanhf(logf(one + expf(X[i]))) # <<<<<<<<<<<<<< * * */ /*else*/ { (__pyx_v_Y[__pyx_v_i]) = ((__pyx_v_X[__pyx_v_i]) * tanhf(logf((__pyx_v_one + expf((__pyx_v_X[__pyx_v_i])))))); } __pyx_L5:; } /* "thinc/neural/ops.pyx":946 * * * cdef void cpu_mish(weight_t* Y, const weight_t* X, float threshold, int N) nogil: # <<<<<<<<<<<<<< * cdef float one = 1. * for i in range(N): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_mish", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":955 * * * cdef void cpu_backprop_mish(weight_t* dX, # <<<<<<<<<<<<<< * const weight_t* dY, const weight_t* X, float threshold, int N) nogil: * cdef float one = 1. */ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_mish(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_dX, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_dY, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, float __pyx_v_threshold, int __pyx_v_N) { CYTHON_UNUSED float __pyx_v_one; float __pyx_v_exp_x; float __pyx_v_exp_2x; float __pyx_v_exp_3x; float __pyx_v_omega; float __pyx_v_delta; int __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_x; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_backprop_mish", __pyx_f[0], 955, 1, __PYX_ERR(0, 955, __pyx_L1_error)); /* "thinc/neural/ops.pyx":957 * cdef void cpu_backprop_mish(weight_t* dX, * const weight_t* dY, const weight_t* X, float threshold, int N) nogil: * cdef float one = 1. # <<<<<<<<<<<<<< * cdef float exp_x, exp_2x, exp_3x, omega, delta * for i in range(N): */ __pyx_v_one = 1.; /* "thinc/neural/ops.pyx":959 * cdef float one = 1. * cdef float exp_x, exp_2x, exp_3x, omega, delta * for i in range(N): # <<<<<<<<<<<<<< * x = X[i] * if x >= threshold: */ __pyx_t_1 = __pyx_v_N; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":960 * cdef float exp_x, exp_2x, exp_3x, omega, delta * for i in range(N): * x = X[i] # <<<<<<<<<<<<<< * if x >= threshold: * dX[i] = dY[i] */ __pyx_v_x = (__pyx_v_X[__pyx_v_i]); /* "thinc/neural/ops.pyx":961 * for i in range(N): * x = X[i] * if x >= threshold: # <<<<<<<<<<<<<< * dX[i] = dY[i] * else: */ __pyx_t_4 = ((__pyx_v_x >= __pyx_v_threshold) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":962 * x = X[i] * if x >= threshold: * dX[i] = dY[i] # <<<<<<<<<<<<<< * else: * exp_x = expf(x) */ (__pyx_v_dX[__pyx_v_i]) = (__pyx_v_dY[__pyx_v_i]); /* "thinc/neural/ops.pyx":961 * for i in range(N): * x = X[i] * if x >= threshold: # <<<<<<<<<<<<<< * dX[i] = dY[i] * else: */ goto __pyx_L5; } /* "thinc/neural/ops.pyx":964 * dX[i] = dY[i] * else: * exp_x = expf(x) # <<<<<<<<<<<<<< * exp_2x = expf(2*x) * exp_3x = expf(3*x) */ /*else*/ { __pyx_v_exp_x = expf(__pyx_v_x); /* "thinc/neural/ops.pyx":965 * else: * exp_x = expf(x) * exp_2x = expf(2*x) # <<<<<<<<<<<<<< * exp_3x = expf(3*x) * omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6) */ __pyx_v_exp_2x = expf((2.0 * __pyx_v_x)); /* "thinc/neural/ops.pyx":966 * exp_x = expf(x) * exp_2x = expf(2*x) * exp_3x = expf(3*x) # <<<<<<<<<<<<<< * omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6) * delta = 2. * exp_x + exp_2x + 2. */ __pyx_v_exp_3x = expf((3.0 * __pyx_v_x)); /* "thinc/neural/ops.pyx":967 * exp_2x = expf(2*x) * exp_3x = expf(3*x) * omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6) # <<<<<<<<<<<<<< * delta = 2. * exp_x + exp_2x + 2. * dX[i] = dY[i] * ((exp_x * omega) / (delta * delta)) */ __pyx_v_omega = ((((4. * (__pyx_v_x + 1.0)) + (4.0 * __pyx_v_exp_2x)) + __pyx_v_exp_3x) + (__pyx_v_exp_x * ((4. * __pyx_v_x) + 6.0))); /* "thinc/neural/ops.pyx":968 * exp_3x = expf(3*x) * omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6) * delta = 2. * exp_x + exp_2x + 2. # <<<<<<<<<<<<<< * dX[i] = dY[i] * ((exp_x * omega) / (delta * delta)) * */ __pyx_v_delta = (((2. * __pyx_v_exp_x) + __pyx_v_exp_2x) + 2.); /* "thinc/neural/ops.pyx":969 * omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6) * delta = 2. * exp_x + exp_2x + 2. * dX[i] = dY[i] * ((exp_x * omega) / (delta * delta)) # <<<<<<<<<<<<<< * * */ (__pyx_v_dX[__pyx_v_i]) = ((__pyx_v_dY[__pyx_v_i]) * ((__pyx_v_exp_x * __pyx_v_omega) / (__pyx_v_delta * __pyx_v_delta))); } __pyx_L5:; } /* "thinc/neural/ops.pyx":955 * * * cdef void cpu_backprop_mish(weight_t* dX, # <<<<<<<<<<<<<< * const weight_t* dY, const weight_t* X, float threshold, int N) nogil: * cdef float one = 1. */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":977 * xp = cupy * * def matmul(self, x, y, out=None): # <<<<<<<<<<<<<< * return self.xp.matmul(x, y, out=out) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_1matmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_1matmul = {"matmul", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_1matmul, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_1matmul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_y = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("matmul (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, 1); __PYX_ERR(0, 977, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, 2); __PYX_ERR(0, 977, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "matmul") < 0)) __PYX_ERR(0, 977, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_y = values[2]; __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("matmul", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 977, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.matmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_matmul(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_matmul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__91) __Pyx_RefNannySetupContext("matmul", 0); __Pyx_TraceCall("matmul", __pyx_f[0], 977, 0, __PYX_ERR(0, 977, __pyx_L1_error)); /* "thinc/neural/ops.pyx":978 * * def matmul(self, x, y, out=None): * return self.xp.matmul(x, y, out=out) # <<<<<<<<<<<<<< * * def gemm(self, x, y, out=None, trans1=False, trans2=False): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_matmul); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_y); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 978, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":977 * xp = cupy * * def matmul(self, x, y, out=None): # <<<<<<<<<<<<<< * return self.xp.matmul(x, y, out=out) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.matmul", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":980 * return self.xp.matmul(x, y, out=out) * * def gemm(self, x, y, out=None, trans1=False, trans2=False): # <<<<<<<<<<<<<< * if trans1: * x = x.T */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_3gemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_3gemm = {"gemm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_3gemm, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_3gemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_x = 0; PyObject *__pyx_v_y = 0; PyObject *__pyx_v_out = 0; PyObject *__pyx_v_trans1 = 0; PyObject *__pyx_v_trans2 = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gemm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_out,&__pyx_n_s_trans1,&__pyx_n_s_trans2,0}; PyObject* values[6] = {0,0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_None)); values[4] = ((PyObject *)((PyObject *)Py_False)); values[5] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, 1); __PYX_ERR(0, 980, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, 2); __PYX_ERR(0, 980, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_trans1); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_trans2); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gemm") < 0)) __PYX_ERR(0, 980, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_x = values[1]; __pyx_v_y = values[2]; __pyx_v_out = values[3]; __pyx_v_trans1 = values[4]; __pyx_v_trans2 = values[5]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gemm", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 980, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.gemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_2gemm(__pyx_self, __pyx_v_self, __pyx_v_x, __pyx_v_y, __pyx_v_out, __pyx_v_trans1, __pyx_v_trans2); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_2gemm(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_x, PyObject *__pyx_v_y, PyObject *__pyx_v_out, PyObject *__pyx_v_trans1, PyObject *__pyx_v_trans2) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__92) __Pyx_RefNannySetupContext("gemm", 0); __Pyx_TraceCall("gemm", __pyx_f[0], 980, 0, __PYX_ERR(0, 980, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_x); __Pyx_INCREF(__pyx_v_y); /* "thinc/neural/ops.pyx":981 * * def gemm(self, x, y, out=None, trans1=False, trans2=False): * if trans1: # <<<<<<<<<<<<<< * x = x.T * if trans2: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_trans1); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 981, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":982 * def gemm(self, x, y, out=None, trans1=False, trans2=False): * if trans1: * x = x.T # <<<<<<<<<<<<<< * if trans2: * y = y.T */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_T); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":981 * * def gemm(self, x, y, out=None, trans1=False, trans2=False): * if trans1: # <<<<<<<<<<<<<< * x = x.T * if trans2: */ } /* "thinc/neural/ops.pyx":983 * if trans1: * x = x.T * if trans2: # <<<<<<<<<<<<<< * y = y.T * if out is None: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_trans2); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 983, __pyx_L1_error) if (__pyx_t_1) { /* "thinc/neural/ops.pyx":984 * x = x.T * if trans2: * y = y.T # <<<<<<<<<<<<<< * if out is None: * return self.xp.dot(x, y) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_y, __pyx_n_s_T); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 984, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_y, __pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":983 * if trans1: * x = x.T * if trans2: # <<<<<<<<<<<<<< * y = y.T * if out is None: */ } /* "thinc/neural/ops.pyx":985 * if trans2: * y = y.T * if out is None: # <<<<<<<<<<<<<< * return self.xp.dot(x, y) * else: */ __pyx_t_1 = (__pyx_v_out == Py_None); __pyx_t_3 = (__pyx_t_1 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":986 * y = y.T * if out is None: * return self.xp.dot(x, y) # <<<<<<<<<<<<<< * else: * self.xp.dot(x, y, out=out) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_dot); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_y}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_x, __pyx_v_y}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_y); __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 986, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":985 * if trans2: * y = y.T * if out is None: # <<<<<<<<<<<<<< * return self.xp.dot(x, y) * else: */ } /* "thinc/neural/ops.pyx":988 * return self.xp.dot(x, y) * else: * self.xp.dot(x, y, out=out) # <<<<<<<<<<<<<< * return out * */ /*else*/ { __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_dot); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_x); __Pyx_INCREF(__pyx_v_y); __Pyx_GIVEREF(__pyx_v_y); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_y); __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 988, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_2, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":989 * else: * self.xp.dot(x, y, out=out) * return out # <<<<<<<<<<<<<< * * def asarray(self, X, dtype=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; } /* "thinc/neural/ops.pyx":980 * return self.xp.matmul(x, y, out=out) * * def gemm(self, x, y, out=None, trans1=False, trans2=False): # <<<<<<<<<<<<<< * if trans1: * x = x.T */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.gemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_x); __Pyx_XDECREF(__pyx_v_y); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":991 * return out * * def asarray(self, X, dtype=None): # <<<<<<<<<<<<<< * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_5asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_5asarray = {"asarray", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_5asarray, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_5asarray(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_dtype = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("asarray (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_dtype,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, 1); __PYX_ERR(0, 991, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "asarray") < 0)) __PYX_ERR(0, 991, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_dtype = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("asarray", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 991, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_4asarray(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_dtype); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_4asarray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_dtype) { PyObject *__pyx_v_pointer = NULL; PyObject *__pyx_v_shape = NULL; PyObject *__pyx_v_array = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__93) __Pyx_RefNannySetupContext("asarray", 0); __Pyx_TraceCall("asarray", __pyx_f[0], 991, 0, __PYX_ERR(0, 991, __pyx_L1_error)); /* "thinc/neural/ops.pyx":992 * * def asarray(self, X, dtype=None): * if isinstance(X, cupy.ndarray): # <<<<<<<<<<<<<< * return self.xp.asarray(X, dtype=dtype) * elif hasattr(X, 'data_ptr'): */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_cupy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = PyObject_IsInstance(__pyx_v_X, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 992, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (__pyx_t_3 != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":993 * def asarray(self, X, dtype=None): * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) # <<<<<<<<<<<<<< * elif hasattr(X, 'data_ptr'): * # Handles PyTorch Tensors */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_X); __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 993, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":992 * * def asarray(self, X, dtype=None): * if isinstance(X, cupy.ndarray): # <<<<<<<<<<<<<< * return self.xp.asarray(X, dtype=dtype) * elif hasattr(X, 'data_ptr'): */ } /* "thinc/neural/ops.pyx":994 * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) * elif hasattr(X, 'data_ptr'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensors * pointer = cupy.cuda.MemoryPointer(X.data_ptr()) */ __pyx_t_4 = __Pyx_HasAttr(__pyx_v_X, __pyx_n_s_data_ptr); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 994, __pyx_L1_error) __pyx_t_3 = (__pyx_t_4 != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":996 * elif hasattr(X, 'data_ptr'): * # Handles PyTorch Tensors * pointer = cupy.cuda.MemoryPointer(X.data_ptr()) # <<<<<<<<<<<<<< * shape = X.stride() * array = self.xp.ndarray(shape, memptr=pointer, dtype=dtype) */ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cupy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_cuda); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_MemoryPointer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_data_ptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_2 = (__pyx_t_7) ? __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_7) : __Pyx_PyObject_CallNoArg(__pyx_t_1); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_6 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_1, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pointer = __pyx_t_6; __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":997 * # Handles PyTorch Tensors * pointer = cupy.cuda.MemoryPointer(X.data_ptr()) * shape = X.stride() # <<<<<<<<<<<<<< * array = self.xp.ndarray(shape, memptr=pointer, dtype=dtype) * return array */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_stride); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_6 = (__pyx_t_2) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2) : __Pyx_PyObject_CallNoArg(__pyx_t_5); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_shape = __pyx_t_6; __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":998 * pointer = cupy.cuda.MemoryPointer(X.data_ptr()) * shape = X.stride() * array = self.xp.ndarray(shape, memptr=pointer, dtype=dtype) # <<<<<<<<<<<<<< * return array * else: */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_shape); __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_memptr, __pyx_v_pointer) < 0) __PYX_ERR(0, 998, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 998, __pyx_L1_error) __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_array = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":999 * shape = X.stride() * array = self.xp.ndarray(shape, memptr=pointer, dtype=dtype) * return array # <<<<<<<<<<<<<< * else: * return self.xp.array(X, dtype=dtype) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_array); __pyx_r = __pyx_v_array; goto __pyx_L0; /* "thinc/neural/ops.pyx":994 * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) * elif hasattr(X, 'data_ptr'): # <<<<<<<<<<<<<< * # Handles PyTorch Tensors * pointer = cupy.cuda.MemoryPointer(X.data_ptr()) */ } /* "thinc/neural/ops.pyx":1001 * return array * else: * return self.xp.array(X, dtype=dtype) # <<<<<<<<<<<<<< * * def maxout(self, X): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_X); __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 1001, __pyx_L1_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; } /* "thinc/neural/ops.pyx":991 * return out * * def asarray(self, X, dtype=None): # <<<<<<<<<<<<<< * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.asarray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pointer); __Pyx_XDECREF(__pyx_v_shape); __Pyx_XDECREF(__pyx_v_array); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1003 * return self.xp.array(X, dtype=dtype) * * def maxout(self, X): # <<<<<<<<<<<<<< * return _custom_kernels.maxout(X) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_7maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_7maxout = {"maxout", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_7maxout, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_7maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maxout (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maxout", 1, 2, 2, 1); __PYX_ERR(0, 1003, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maxout") < 0)) __PYX_ERR(0, 1003, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_self = values[0]; __pyx_v_X = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maxout", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1003, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_6maxout(__pyx_self, __pyx_v_self, __pyx_v_X); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_6maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__94) __Pyx_RefNannySetupContext("maxout", 0); __Pyx_TraceCall("maxout", __pyx_f[0], 1003, 0, __PYX_ERR(0, 1003, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1004 * * def maxout(self, X): * return _custom_kernels.maxout(X) # <<<<<<<<<<<<<< * * def backprop_maxout(self, dY, which, int P): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_maxout); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_X); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1003 * return self.xp.array(X, dtype=dtype) * * def maxout(self, X): # <<<<<<<<<<<<<< * return _custom_kernels.maxout(X) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1006 * return _custom_kernels.maxout(X) * * def backprop_maxout(self, dY, which, int P): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_maxout(dY, which, P) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_9backprop_maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_9backprop_maxout = {"backprop_maxout", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_9backprop_maxout, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_9backprop_maxout(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; PyObject *__pyx_v_which = 0; int __pyx_v_P; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_maxout (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_which,&__pyx_n_s_P,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 1); __PYX_ERR(0, 1006, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 2); __PYX_ERR(0, 1006, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_P)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, 3); __PYX_ERR(0, 1006, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_maxout") < 0)) __PYX_ERR(0, 1006, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_which = values[2]; __pyx_v_P = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_P == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1006, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_maxout", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1006, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_8backprop_maxout(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_which, __pyx_v_P); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_8backprop_maxout(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_which, int __pyx_v_P) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__95) __Pyx_RefNannySetupContext("backprop_maxout", 0); __Pyx_TraceCall("backprop_maxout", __pyx_f[0], 1006, 0, __PYX_ERR(0, 1006, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1007 * * def backprop_maxout(self, dY, which, int P): * return _custom_kernels.backprop_maxout(dY, which, P) # <<<<<<<<<<<<<< * * def relu(self, X, inplace=False): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_backprop_maxout); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_P); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_v_dY, __pyx_v_which, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 3+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_v_dY, __pyx_v_which, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 3+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_6 = PyTuple_New(3+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_dY); __Pyx_GIVEREF(__pyx_v_dY); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_dY); __Pyx_INCREF(__pyx_v_which); __Pyx_GIVEREF(__pyx_v_which); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_which); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 2+__pyx_t_5, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1006 * return _custom_kernels.maxout(X) * * def backprop_maxout(self, dY, which, int P): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_maxout(dY, which, P) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1009 * return _custom_kernels.backprop_maxout(dY, which, P) * * def relu(self, X, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return X * (X > 0) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_11relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_11relu = {"relu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_11relu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_11relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("relu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("relu", 0, 2, 3, 1); __PYX_ERR(0, 1009, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "relu") < 0)) __PYX_ERR(0, 1009, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("relu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1009, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_10relu(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_10relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_inplace) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__96) __Pyx_RefNannySetupContext("relu", 0); __Pyx_TraceCall("relu", __pyx_f[0], 1009, 0, __PYX_ERR(0, 1009, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_X); /* "thinc/neural/ops.pyx":1010 * * def relu(self, X, inplace=False): * if not inplace: # <<<<<<<<<<<<<< * return X * (X > 0) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 1010, __pyx_L1_error) __pyx_t_2 = ((!__pyx_t_1) != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":1011 * def relu(self, X, inplace=False): * if not inplace: * return X * (X > 0) # <<<<<<<<<<<<<< * else: * X *= (X > 0) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyObject_RichCompare(__pyx_v_X, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1011, __pyx_L1_error) __pyx_t_4 = PyNumber_Multiply(__pyx_v_X, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1011, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1010 * * def relu(self, X, inplace=False): * if not inplace: # <<<<<<<<<<<<<< * return X * (X > 0) * else: */ } /* "thinc/neural/ops.pyx":1013 * return X * (X > 0) * else: * X *= (X > 0) # <<<<<<<<<<<<<< * return X * */ /*else*/ { __pyx_t_4 = PyObject_RichCompare(__pyx_v_X, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1013, __pyx_L1_error) __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_X, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_X, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1014 * else: * X *= (X > 0) * return X # <<<<<<<<<<<<<< * * def backprop_relu(self, delta_, signal_out, inplace=False): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_X); __pyx_r = __pyx_v_X; goto __pyx_L0; } /* "thinc/neural/ops.pyx":1009 * return _custom_kernels.backprop_maxout(dY, which, P) * * def relu(self, X, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return X * (X > 0) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_X); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1016 * return X * * def backprop_relu(self, delta_, signal_out, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return delta_ * (signal_out > 0) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_13backprop_relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_13backprop_relu = {"backprop_relu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_13backprop_relu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_13backprop_relu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_delta_ = 0; PyObject *__pyx_v_signal_out = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_relu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_delta,&__pyx_n_s_signal_out_2,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_False)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_delta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, 1); __PYX_ERR(0, 1016, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal_out_2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, 2); __PYX_ERR(0, 1016, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_relu") < 0)) __PYX_ERR(0, 1016, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_delta_ = values[1]; __pyx_v_signal_out = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_relu", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1016, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_12backprop_relu(__pyx_self, __pyx_v_self, __pyx_v_delta_, __pyx_v_signal_out, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_12backprop_relu(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_delta_, PyObject *__pyx_v_signal_out, PyObject *__pyx_v_inplace) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__97) __Pyx_RefNannySetupContext("backprop_relu", 0); __Pyx_TraceCall("backprop_relu", __pyx_f[0], 1016, 0, __PYX_ERR(0, 1016, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_delta_); /* "thinc/neural/ops.pyx":1017 * * def backprop_relu(self, delta_, signal_out, inplace=False): * if not inplace: # <<<<<<<<<<<<<< * return delta_ * (signal_out > 0) * delta_ *= (signal_out > 0) */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 1017, __pyx_L1_error) __pyx_t_2 = ((!__pyx_t_1) != 0); if (__pyx_t_2) { /* "thinc/neural/ops.pyx":1018 * def backprop_relu(self, delta_, signal_out, inplace=False): * if not inplace: * return delta_ * (signal_out > 0) # <<<<<<<<<<<<<< * delta_ *= (signal_out > 0) * return delta_ */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyObject_RichCompare(__pyx_v_signal_out, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1018, __pyx_L1_error) __pyx_t_4 = PyNumber_Multiply(__pyx_v_delta_, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1017 * * def backprop_relu(self, delta_, signal_out, inplace=False): * if not inplace: # <<<<<<<<<<<<<< * return delta_ * (signal_out > 0) * delta_ *= (signal_out > 0) */ } /* "thinc/neural/ops.pyx":1019 * if not inplace: * return delta_ * (signal_out > 0) * delta_ *= (signal_out > 0) # <<<<<<<<<<<<<< * return delta_ * */ __pyx_t_4 = PyObject_RichCompare(__pyx_v_signal_out, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1019, __pyx_L1_error) __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_delta_, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1019, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_delta_, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1020 * return delta_ * (signal_out > 0) * delta_ *= (signal_out > 0) * return delta_ # <<<<<<<<<<<<<< * * def selu(self, X, inplace=True): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_delta_); __pyx_r = __pyx_v_delta_; goto __pyx_L0; /* "thinc/neural/ops.pyx":1016 * return X * * def backprop_relu(self, delta_, signal_out, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return delta_ * (signal_out > 0) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_relu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_delta_); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1022 * return delta_ * * def selu(self, X, inplace=True): # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_15selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_15selu = {"selu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_15selu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_15selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("selu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_inplace,0}; PyObject* values[3] = {0,0,0}; values[2] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("selu", 0, 2, 3, 1); __PYX_ERR(0, 1022, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "selu") < 0)) __PYX_ERR(0, 1022, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_inplace = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("selu", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1022, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_14selu(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_14selu(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_inplace) { float __pyx_v_scale; float __pyx_v_alpha; PyObject *__pyx_v_out = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__98) __Pyx_RefNannySetupContext("selu", 0); __Pyx_TraceCall("selu", __pyx_f[0], 1022, 0, __PYX_ERR(0, 1022, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1023 * * def selu(self, X, inplace=True): * cdef float scale = 1.0507009873554805 # <<<<<<<<<<<<<< * cdef float alpha = 1.6732632423543772 * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) */ __pyx_v_scale = 1.0507009873554805; /* "thinc/neural/ops.pyx":1024 * def selu(self, X, inplace=True): * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 # <<<<<<<<<<<<<< * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) * if inplace: */ __pyx_v_alpha = 1.6732632423543772; /* "thinc/neural/ops.pyx":1025 * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) # <<<<<<<<<<<<<< * if inplace: * copy_array(X, out) */ __pyx_t_1 = PyFloat_FromDouble(__pyx_v_scale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_where); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_RichCompare(__pyx_v_X, __pyx_float_0_, Py_GE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1025, __pyx_L1_error) __pyx_t_5 = PyFloat_FromDouble(__pyx_v_alpha); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_exp); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_6 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_7, __pyx_v_X) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_v_X); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyFloat_SubtractObjC(__pyx_t_6, __pyx_float_1_, 1., 0, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Multiply(__pyx_t_5, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_3, __pyx_v_X, __pyx_t_6}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[4] = {__pyx_t_8, __pyx_t_3, __pyx_v_X, __pyx_t_6}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_5 = PyTuple_New(3+__pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_8) { __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_8); __pyx_t_8 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_9, __pyx_t_3); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_9, __pyx_v_X); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_9, __pyx_t_6); __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_out = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1026 * cdef float alpha = 1.6732632423543772 * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) * if inplace: # <<<<<<<<<<<<<< * copy_array(X, out) * return out */ __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_10 < 0)) __PYX_ERR(0, 1026, __pyx_L1_error) if (__pyx_t_10) { /* "thinc/neural/ops.pyx":1027 * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) * if inplace: * copy_array(X, out) # <<<<<<<<<<<<<< * return out * */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_X, __pyx_v_out}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_X, __pyx_v_out}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_9, __pyx_v_X); __Pyx_INCREF(__pyx_v_out); __Pyx_GIVEREF(__pyx_v_out); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_9, __pyx_v_out); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1026 * cdef float alpha = 1.6732632423543772 * out = scale * self.xp.where(X>=0., X, alpha * (self.xp.exp(X)-1.)) * if inplace: # <<<<<<<<<<<<<< * copy_array(X, out) * return out */ } /* "thinc/neural/ops.pyx":1028 * if inplace: * copy_array(X, out) * return out # <<<<<<<<<<<<<< * * def backprop_selu(self, delta, signal_in, */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":1022 * return delta_ * * def selu(self, X, inplace=True): # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1030 * return out * * def backprop_selu(self, delta, signal_in, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_17backprop_selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_17backprop_selu = {"backprop_selu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_17backprop_selu, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_17backprop_selu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_delta = 0; PyObject *__pyx_v_signal_in = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_selu (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_delta_2,&__pyx_n_s_signal_in_2,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; /* "thinc/neural/ops.pyx":1031 * * def backprop_selu(self, delta, signal_in, * inplace=True): # <<<<<<<<<<<<<< * # Backprop the SELU transformation * cdef float scale = 1.0507009873554805 */ values[3] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_delta_2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, 1); __PYX_ERR(0, 1030, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signal_in_2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, 2); __PYX_ERR(0, 1030, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_selu") < 0)) __PYX_ERR(0, 1030, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_delta = values[1]; __pyx_v_signal_in = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_selu", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1030, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_16backprop_selu(__pyx_self, __pyx_v_self, __pyx_v_delta, __pyx_v_signal_in, __pyx_v_inplace); /* "thinc/neural/ops.pyx":1030 * return out * * def backprop_selu(self, delta, signal_in, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_16backprop_selu(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_delta, PyObject *__pyx_v_signal_in, PyObject *__pyx_v_inplace) { float __pyx_v_scale; float __pyx_v_alpha; PyObject *__pyx_v_out = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__99) __Pyx_RefNannySetupContext("backprop_selu", 0); __Pyx_TraceCall("backprop_selu", __pyx_f[0], 1030, 0, __PYX_ERR(0, 1030, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1033 * inplace=True): * # Backprop the SELU transformation * cdef float scale = 1.0507009873554805 # <<<<<<<<<<<<<< * cdef float alpha = 1.6732632423543772 * out = delta * self.xp.where(signal_in >= 0, scale, */ __pyx_v_scale = 1.0507009873554805; /* "thinc/neural/ops.pyx":1034 * # Backprop the SELU transformation * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 # <<<<<<<<<<<<<< * out = delta * self.xp.where(signal_in >= 0, scale, * scale * alpha * self.xp.exp(signal_in)) */ __pyx_v_alpha = 1.6732632423543772; /* "thinc/neural/ops.pyx":1035 * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 * out = delta * self.xp.where(signal_in >= 0, scale, # <<<<<<<<<<<<<< * scale * alpha * self.xp.exp(signal_in)) * if inplace: */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_where); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_RichCompare(__pyx_v_signal_in, __pyx_int_0, Py_GE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1035, __pyx_L1_error) __pyx_t_4 = PyFloat_FromDouble(__pyx_v_scale); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); /* "thinc/neural/ops.pyx":1036 * cdef float alpha = 1.6732632423543772 * out = delta * self.xp.where(signal_in >= 0, scale, * scale * alpha * self.xp.exp(signal_in)) # <<<<<<<<<<<<<< * if inplace: * copy_array(delta, out) */ __pyx_t_5 = PyFloat_FromDouble((__pyx_v_scale * __pyx_v_alpha)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1036, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1036, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_exp); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1036, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_6 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_7, __pyx_v_signal_in) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_v_signal_in); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1036, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_Multiply(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1036, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_t_2, __pyx_t_4, __pyx_t_8}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_t_2, __pyx_t_4, __pyx_t_8}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_9, 3+__pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif { __pyx_t_5 = PyTuple_New(3+__pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_9, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_9, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_9, __pyx_t_8); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_8 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1035 * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 * out = delta * self.xp.where(signal_in >= 0, scale, # <<<<<<<<<<<<<< * scale * alpha * self.xp.exp(signal_in)) * if inplace: */ __pyx_t_3 = PyNumber_Multiply(__pyx_v_delta, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1035, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = __pyx_t_3; __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1037 * out = delta * self.xp.where(signal_in >= 0, scale, * scale * alpha * self.xp.exp(signal_in)) * if inplace: # <<<<<<<<<<<<<< * copy_array(delta, out) * return out */ __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_10 < 0)) __PYX_ERR(0, 1037, __pyx_L1_error) if (__pyx_t_10) { /* "thinc/neural/ops.pyx":1038 * scale * alpha * self.xp.exp(signal_in)) * if inplace: * copy_array(delta, out) # <<<<<<<<<<<<<< * return out * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_9 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_9 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_delta, __pyx_v_out}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1038, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_delta, __pyx_v_out}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1038, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_delta); __Pyx_GIVEREF(__pyx_v_delta); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_9, __pyx_v_delta); __Pyx_INCREF(__pyx_v_out); __Pyx_GIVEREF(__pyx_v_out); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_9, __pyx_v_out); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1037 * out = delta * self.xp.where(signal_in >= 0, scale, * scale * alpha * self.xp.exp(signal_in)) * if inplace: # <<<<<<<<<<<<<< * copy_array(delta, out) * return out */ } /* "thinc/neural/ops.pyx":1039 * if inplace: * copy_array(delta, out) * return out # <<<<<<<<<<<<<< * * def mish(self, X, threshold=5, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out); __pyx_r = __pyx_v_out; goto __pyx_L0; /* "thinc/neural/ops.pyx":1030 * return out * * def backprop_selu(self, delta, signal_in, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_selu", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1041 * return out * * def mish(self, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.mish(X, threshold=threshold, out=out) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_19mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_19mish = {"mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_19mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_19mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)((PyObject *)__pyx_int_5)); values[3] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, 1); __PYX_ERR(0, 1041, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mish") < 0)) __PYX_ERR(0, 1041, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_threshold = values[2]; __pyx_v_out = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mish", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1041, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_18mish(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_18mish(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__100) __Pyx_RefNannySetupContext("mish", 0); __Pyx_TraceCall("mish", __pyx_f[0], 1041, 0, __PYX_ERR(0, 1041, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1042 * * def mish(self, X, threshold=5, out=None): * return _custom_kernels.mish(X, threshold=threshold, out=out) # <<<<<<<<<<<<<< * * def backprop_mish(self, dY, X, threshold=5, out=None): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1042, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_mish); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1042, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1042, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_X); __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1042, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_threshold, __pyx_v_threshold) < 0) __PYX_ERR(0, 1042, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 1042, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1042, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1041 * return out * * def mish(self, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.mish(X, threshold=threshold, out=out) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1044 * return _custom_kernels.mish(X, threshold=threshold, out=out) * * def backprop_mish(self, dY, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_21backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_21backprop_mish = {"backprop_mish", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_21backprop_mish, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_21backprop_mish(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_threshold = 0; PyObject *__pyx_v_out = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_mish (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_X,&__pyx_n_s_threshold,&__pyx_n_s_out,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)((PyObject *)__pyx_int_5)); values[4] = ((PyObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 1); __PYX_ERR(0, 1044, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, 2); __PYX_ERR(0, 1044, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_mish") < 0)) __PYX_ERR(0, 1044, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_X = values[2]; __pyx_v_threshold = values[3]; __pyx_v_out = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_mish", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1044, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_20backprop_mish(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_X, __pyx_v_threshold, __pyx_v_out); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_20backprop_mish(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, PyObject *__pyx_v_X, PyObject *__pyx_v_threshold, PyObject *__pyx_v_out) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__101) __Pyx_RefNannySetupContext("backprop_mish", 0); __Pyx_TraceCall("backprop_mish", __pyx_f[0], 1044, 0, __PYX_ERR(0, 1044, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1045 * * def backprop_mish(self, dY, X, threshold=5, out=None): * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) # <<<<<<<<<<<<<< * * def clip_gradient(self, gradient, threshold): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1045, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_backprop_mish); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1045, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1045, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_dY); __Pyx_GIVEREF(__pyx_v_dY); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_dY); __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_X); __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1045, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_threshold, __pyx_v_threshold) < 0) __PYX_ERR(0, 1045, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_out, __pyx_v_out) < 0) __PYX_ERR(0, 1045, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1045, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1044 * return _custom_kernels.mish(X, threshold=threshold, out=out) * * def backprop_mish(self, dY, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_mish", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1047 * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_23clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_23clip_gradient = {"clip_gradient", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_23clip_gradient, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_23clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_threshold = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("clip_gradient (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_gradient,&__pyx_n_s_threshold,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, 1); __PYX_ERR(0, 1047, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, 2); __PYX_ERR(0, 1047, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "clip_gradient") < 0)) __PYX_ERR(0, 1047, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_gradient = values[1]; __pyx_v_threshold = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("clip_gradient", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1047, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_22clip_gradient(__pyx_self, __pyx_v_self, __pyx_v_gradient, __pyx_v_threshold); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_22clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_gradient, PyObject *__pyx_v_threshold) { PyObject *__pyx_v_xp = NULL; PyObject *__pyx_v_grad_norm = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__102) __Pyx_RefNannySetupContext("clip_gradient", 0); __Pyx_TraceCall("clip_gradient", __pyx_f[0], 1047, 0, __PYX_ERR(0, 1047, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_gradient); /* "thinc/neural/ops.pyx":1048 * * def clip_gradient(self, gradient, threshold): * xp = get_array_module(gradient) # <<<<<<<<<<<<<< * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1048, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_gradient) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_gradient); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1048, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_xp = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1049 * def clip_gradient(self, gradient, threshold): * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) # <<<<<<<<<<<<<< * if grad_norm >= threshold: * gradient *= threshold / grad_norm */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_xp, __pyx_n_s_linalg); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1049, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_norm); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1049, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_gradient) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_gradient); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1049, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_grad_norm = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1050 * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * gradient *= threshold / grad_norm * */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_grad_norm, __pyx_v_threshold, Py_GE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1050, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1050, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_4) { /* "thinc/neural/ops.pyx":1051 * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: * gradient *= threshold / grad_norm # <<<<<<<<<<<<<< * * def seq2col(self, seq, int nW): */ __pyx_t_1 = __Pyx_PyNumber_Divide(__pyx_v_threshold, __pyx_v_grad_norm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1051, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_gradient, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1051, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_gradient, __pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1050 * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * gradient *= threshold / grad_norm * */ } /* "thinc/neural/ops.pyx":1047 * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_xp); __Pyx_XDECREF(__pyx_v_grad_norm); __Pyx_XDECREF(__pyx_v_gradient); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1053 * gradient *= threshold / grad_norm * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_25seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5thinc_6neural_3ops_7CupyOps_24seq2col[] = "Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.\n The new sequence is constructed by concatenating nW preceding and succeeding\n vectors onto each column in the sequence, to extract a window of features.\n "; static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_25seq2col = {"seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_25seq2col, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5thinc_6neural_3ops_7CupyOps_24seq2col}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_25seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_seq = 0; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_seq,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seq)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 1); __PYX_ERR(0, 1053, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, 2); __PYX_ERR(0, 1053, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "seq2col") < 0)) __PYX_ERR(0, 1053, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_seq = values[1]; __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1053, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1053, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_24seq2col(__pyx_self, __pyx_v_self, __pyx_v_seq, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_24seq2col(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_seq, int __pyx_v_nW) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__103) __Pyx_RefNannySetupContext("seq2col", 0); __Pyx_TraceCall("seq2col", __pyx_f[0], 1053, 0, __PYX_ERR(0, 1053, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1058 * vectors onto each column in the sequence, to extract a window of features. * ''' * return _custom_kernels.seq2col(seq, nW) # <<<<<<<<<<<<<< * * def backprop_seq2col(self, dY, int nW): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_seq2col); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_seq, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_seq, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_seq); __Pyx_GIVEREF(__pyx_v_seq); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_seq); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1058, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1053 * gradient *= threshold / grad_norm * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1060 * return _custom_kernels.seq2col(seq, nW) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_seq2col(dY, nW) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_27backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_27backprop_seq2col = {"backprop_seq2col", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_27backprop_seq2col, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_27backprop_seq2col(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_dY = 0; int __pyx_v_nW; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_seq2col (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_dY,&__pyx_n_s_nW,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 1); __PYX_ERR(0, 1060, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, 2); __PYX_ERR(0, 1060, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_seq2col") < 0)) __PYX_ERR(0, 1060, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_dY = values[1]; __pyx_v_nW = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nW == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1060, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_seq2col", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1060, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_26backprop_seq2col(__pyx_self, __pyx_v_self, __pyx_v_dY, __pyx_v_nW); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_26backprop_seq2col(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_dY, int __pyx_v_nW) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__104) __Pyx_RefNannySetupContext("backprop_seq2col", 0); __Pyx_TraceCall("backprop_seq2col", __pyx_f[0], 1060, 0, __PYX_ERR(0, 1060, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1061 * * def backprop_seq2col(self, dY, int nW): * return _custom_kernels.backprop_seq2col(dY, nW) # <<<<<<<<<<<<<< * * def mean_pool(self, X, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_backprop_seq2col); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nW); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_dY, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_dY, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_dY); __Pyx_GIVEREF(__pyx_v_dY); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_dY); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1061, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1060 * return _custom_kernels.seq2col(seq, nW) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_seq2col(dY, nW) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1063 * return _custom_kernels.backprop_seq2col(dY, nW) * * def mean_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.mean_pool(X, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_29mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_29mean_pool = {"mean_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_29mean_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_29mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mean_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, 1); __PYX_ERR(0, 1063, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, 2); __PYX_ERR(0, 1063, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mean_pool") < 0)) __PYX_ERR(0, 1063, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_lengths = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mean_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1063, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_28mean_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_28mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__105) __Pyx_RefNannySetupContext("mean_pool", 0); __Pyx_TraceCall("mean_pool", __pyx_f[0], 1063, 0, __PYX_ERR(0, 1063, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1064 * * def mean_pool(self, X, lengths): * return _custom_kernels.mean_pool(X, lengths) # <<<<<<<<<<<<<< * * def backprop_mean_pool(self, d_means, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mean_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_X); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1064, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1063 * return _custom_kernels.backprop_seq2col(dY, nW) * * def mean_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.mean_pool(X, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1066 * return _custom_kernels.mean_pool(X, lengths) * * def backprop_mean_pool(self, d_means, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mean_pool(d_means, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_31backprop_mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_31backprop_mean_pool = {"backprop_mean_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_31backprop_mean_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_31backprop_mean_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d_means = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_mean_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_means,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_means)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, 1); __PYX_ERR(0, 1066, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, 2); __PYX_ERR(0, 1066, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_mean_pool") < 0)) __PYX_ERR(0, 1066, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_d_means = values[1]; __pyx_v_lengths = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_mean_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1066, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_30backprop_mean_pool(__pyx_self, __pyx_v_self, __pyx_v_d_means, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_30backprop_mean_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_means, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__106) __Pyx_RefNannySetupContext("backprop_mean_pool", 0); __Pyx_TraceCall("backprop_mean_pool", __pyx_f[0], 1066, 0, __PYX_ERR(0, 1066, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1067 * * def backprop_mean_pool(self, d_means, lengths): * return _custom_kernels.backprop_mean_pool(d_means, lengths) # <<<<<<<<<<<<<< * * def max_pool(self, X, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_backprop_mean_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_means, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_means, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_d_means); __Pyx_GIVEREF(__pyx_v_d_means); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_d_means); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1066 * return _custom_kernels.mean_pool(X, lengths) * * def backprop_mean_pool(self, d_means, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mean_pool(d_means, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1069 * return _custom_kernels.backprop_mean_pool(d_means, lengths) * * def max_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.max_pool(X, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_33max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_33max_pool = {"max_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_33max_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_33max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("max_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, 1); __PYX_ERR(0, 1069, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, 2); __PYX_ERR(0, 1069, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "max_pool") < 0)) __PYX_ERR(0, 1069, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_lengths = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("max_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1069, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_32max_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_32max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__107) __Pyx_RefNannySetupContext("max_pool", 0); __Pyx_TraceCall("max_pool", __pyx_f[0], 1069, 0, __PYX_ERR(0, 1069, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1070 * * def max_pool(self, X, lengths): * return _custom_kernels.max_pool(X, lengths) # <<<<<<<<<<<<<< * * def backprop_max_pool(self, d_maxes, which, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_max_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_X); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1069 * return _custom_kernels.backprop_mean_pool(d_means, lengths) * * def max_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.max_pool(X, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1072 * return _custom_kernels.max_pool(X, lengths) * * def backprop_max_pool(self, d_maxes, which, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_35backprop_max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_35backprop_max_pool = {"backprop_max_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_35backprop_max_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_35backprop_max_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d_maxes = 0; PyObject *__pyx_v_which = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_max_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_maxes,&__pyx_n_s_which,&__pyx_n_s_lengths,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_maxes)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 1); __PYX_ERR(0, 1072, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_which)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 2); __PYX_ERR(0, 1072, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, 3); __PYX_ERR(0, 1072, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_max_pool") < 0)) __PYX_ERR(0, 1072, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_d_maxes = values[1]; __pyx_v_which = values[2]; __pyx_v_lengths = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_max_pool", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1072, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_34backprop_max_pool(__pyx_self, __pyx_v_self, __pyx_v_d_maxes, __pyx_v_which, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_34backprop_max_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_maxes, PyObject *__pyx_v_which, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__108) __Pyx_RefNannySetupContext("backprop_max_pool", 0); __Pyx_TraceCall("backprop_max_pool", __pyx_f[0], 1072, 0, __PYX_ERR(0, 1072, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1073 * * def backprop_max_pool(self, d_maxes, which, lengths): * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) # <<<<<<<<<<<<<< * * def sum_pool(self, X, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_backprop_max_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_d_maxes, __pyx_v_which, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_d_maxes, __pyx_v_which, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(3+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_d_maxes); __Pyx_GIVEREF(__pyx_v_d_maxes); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_d_maxes); __Pyx_INCREF(__pyx_v_which); __Pyx_GIVEREF(__pyx_v_which); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_which); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1072 * return _custom_kernels.max_pool(X, lengths) * * def backprop_max_pool(self, d_maxes, which, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1075 * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * * def sum_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.sum_pool(X, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_37sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_37sum_pool = {"sum_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_37sum_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_37sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sum_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_X,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, 1); __PYX_ERR(0, 1075, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, 2); __PYX_ERR(0, 1075, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sum_pool") < 0)) __PYX_ERR(0, 1075, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_X = values[1]; __pyx_v_lengths = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("sum_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1075, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_36sum_pool(__pyx_self, __pyx_v_self, __pyx_v_X, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_36sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_X, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__109) __Pyx_RefNannySetupContext("sum_pool", 0); __Pyx_TraceCall("sum_pool", __pyx_f[0], 1075, 0, __PYX_ERR(0, 1075, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1076 * * def sum_pool(self, X, lengths): * return _custom_kernels.sum_pool(X, lengths) # <<<<<<<<<<<<<< * * def backprop_sum_pool(self, d_sums, lengths): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_X, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_X); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1075 * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * * def sum_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.sum_pool(X, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1078 * return _custom_kernels.sum_pool(X, lengths) * * def backprop_sum_pool(self, d_sums, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_sum_pool(d_sums, lengths) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_39backprop_sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_39backprop_sum_pool = {"backprop_sum_pool", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_39backprop_sum_pool, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_39backprop_sum_pool(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_d_sums = 0; PyObject *__pyx_v_lengths = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_sum_pool (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_d_sums,&__pyx_n_s_lengths,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d_sums)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, 1); __PYX_ERR(0, 1078, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lengths)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, 2); __PYX_ERR(0, 1078, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_sum_pool") < 0)) __PYX_ERR(0, 1078, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_d_sums = values[1]; __pyx_v_lengths = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_sum_pool", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1078, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_38backprop_sum_pool(__pyx_self, __pyx_v_self, __pyx_v_d_sums, __pyx_v_lengths); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_38backprop_sum_pool(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_d_sums, PyObject *__pyx_v_lengths) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__110) __Pyx_RefNannySetupContext("backprop_sum_pool", 0); __Pyx_TraceCall("backprop_sum_pool", __pyx_f[0], 1078, 0, __PYX_ERR(0, 1078, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1079 * * def backprop_sum_pool(self, d_sums, lengths): * return _custom_kernels.backprop_sum_pool(d_sums, lengths) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_backprop_sum_pool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_sums, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_d_sums, __pyx_v_lengths}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 2+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(2+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_d_sums); __Pyx_GIVEREF(__pyx_v_d_sums); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_d_sums); __Pyx_INCREF(__pyx_v_lengths); __Pyx_GIVEREF(__pyx_v_lengths); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_lengths); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1078 * return _custom_kernels.sum_pool(X, lengths) * * def backprop_sum_pool(self, d_sums, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_sum_pool(d_sums, lengths) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.backprop_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1083 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, ids, uint64_t seed): # <<<<<<<<<<<<<< * return _custom_kernels.hash(ids, seed) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_41hash(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_41hash = {"hash", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_41hash, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_41hash(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_ids = 0; uint64_t __pyx_v_seed; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("hash (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_ids,&__pyx_n_s_seed,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ids)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, 1); __PYX_ERR(0, 1083, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, 2); __PYX_ERR(0, 1083, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hash") < 0)) __PYX_ERR(0, 1083, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_self = values[0]; __pyx_v_ids = values[1]; __pyx_v_seed = __Pyx_PyInt_As_uint64_t(values[2]); if (unlikely((__pyx_v_seed == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1083, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("hash", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1083, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.hash", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_40hash(__pyx_self, __pyx_v_self, __pyx_v_ids, __pyx_v_seed); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_40hash(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_ids, uint64_t __pyx_v_seed) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__111) __Pyx_RefNannySetupContext("hash", 0); __Pyx_TraceCall("hash", __pyx_f[0], 1083, 0, __PYX_ERR(0, 1083, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1084 * @cython.wraparound(False) * def hash(self, ids, uint64_t seed): * return _custom_kernels.hash(ids, seed) # <<<<<<<<<<<<<< * * def scatter_add(self, out, ids, inputs): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_hash); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_uint64_t(__pyx_v_seed); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_ids, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_ids, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_ids); __Pyx_GIVEREF(__pyx_v_ids); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_ids); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1083 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, ids, uint64_t seed): # <<<<<<<<<<<<<< * return _custom_kernels.hash(ids, seed) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.hash", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1086 * return _custom_kernels.hash(ids, seed) * * def scatter_add(self, out, ids, inputs): # <<<<<<<<<<<<<< * self.xp.scatter_add(out, ids, inputs) * */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_43scatter_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_43scatter_add = {"scatter_add", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_43scatter_add, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_43scatter_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_out = 0; PyObject *__pyx_v_ids = 0; PyObject *__pyx_v_inputs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("scatter_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_out,&__pyx_n_s_ids,&__pyx_n_s_inputs,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_out)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 1); __PYX_ERR(0, 1086, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ids)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 2); __PYX_ERR(0, 1086, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, 3); __PYX_ERR(0, 1086, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "scatter_add") < 0)) __PYX_ERR(0, 1086, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_self = values[0]; __pyx_v_out = values[1]; __pyx_v_ids = values[2]; __pyx_v_inputs = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("scatter_add", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1086, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.scatter_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_42scatter_add(__pyx_self, __pyx_v_self, __pyx_v_out, __pyx_v_ids, __pyx_v_inputs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_42scatter_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_out, PyObject *__pyx_v_ids, PyObject *__pyx_v_inputs) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__112) __Pyx_RefNannySetupContext("scatter_add", 0); __Pyx_TraceCall("scatter_add", __pyx_f[0], 1086, 0, __PYX_ERR(0, 1086, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1087 * * def scatter_add(self, out, ids, inputs): * self.xp.scatter_add(out, ids, inputs) # <<<<<<<<<<<<<< * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_scatter_add); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_out, __pyx_v_ids, __pyx_v_inputs}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_out, __pyx_v_ids, __pyx_v_inputs}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(3+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_out); __Pyx_GIVEREF(__pyx_v_out); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_out); __Pyx_INCREF(__pyx_v_ids); __Pyx_GIVEREF(__pyx_v_ids); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_ids); __Pyx_INCREF(__pyx_v_inputs); __Pyx_GIVEREF(__pyx_v_inputs); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_inputs); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1087, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1086 * return _custom_kernels.hash(ids, seed) * * def scatter_add(self, out, ids, inputs): # <<<<<<<<<<<<<< * self.xp.scatter_add(out, ids, inputs) * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.scatter_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1089 * self.xp.scatter_add(out, ids, inputs) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_45adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_45adam = {"adam", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_45adam, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_45adam(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED PyObject *__pyx_v_self = 0; PyObject *__pyx_v_weights = 0; PyObject *__pyx_v_gradient = 0; PyObject *__pyx_v_mom1 = 0; PyObject *__pyx_v_mom2 = 0; PyObject *__pyx_v_beta1 = 0; PyObject *__pyx_v_beta2 = 0; PyObject *__pyx_v_eps = 0; PyObject *__pyx_v_learn_rate = 0; CYTHON_UNUSED PyObject *__pyx_v_mod_rate = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("adam (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_weights,&__pyx_n_s_gradient,&__pyx_n_s_mom1,&__pyx_n_s_mom2,&__pyx_n_s_beta1,&__pyx_n_s_beta2,&__pyx_n_s_eps,&__pyx_n_s_learn_rate,&__pyx_n_s_mod_rate,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; values[9] = ((PyObject *)((PyObject*)__pyx_float_1_)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 1); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 2); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 3); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mom2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 4); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 5); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_beta2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 6); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_eps)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 7); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_learn_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, 8); __PYX_ERR(0, 1089, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mod_rate); if (value) { values[9] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "adam") < 0)) __PYX_ERR(0, 1089, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_weights = values[1]; __pyx_v_gradient = values[2]; __pyx_v_mom1 = values[3]; __pyx_v_mom2 = values[4]; __pyx_v_beta1 = values[5]; __pyx_v_beta2 = values[6]; __pyx_v_eps = values[7]; __pyx_v_learn_rate = values[8]; __pyx_v_mod_rate = values[9]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("adam", 0, 9, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1089, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_44adam(__pyx_self, __pyx_v_self, __pyx_v_weights, __pyx_v_gradient, __pyx_v_mom1, __pyx_v_mom2, __pyx_v_beta1, __pyx_v_beta2, __pyx_v_eps, __pyx_v_learn_rate, __pyx_v_mod_rate); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_44adam(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, PyObject *__pyx_v_weights, PyObject *__pyx_v_gradient, PyObject *__pyx_v_mom1, PyObject *__pyx_v_mom2, PyObject *__pyx_v_beta1, PyObject *__pyx_v_beta2, PyObject *__pyx_v_eps, PyObject *__pyx_v_learn_rate, CYTHON_UNUSED PyObject *__pyx_v_mod_rate) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__113) __Pyx_RefNannySetupContext("adam", 0); __Pyx_TraceCall("adam", __pyx_f[0], 1089, 0, __PYX_ERR(0, 1089, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1091 * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( # <<<<<<<<<<<<<< * 'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps', * 'T param, T m, T v', */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cupy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ElementwiseKernel); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__114, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1097 * v += one_minus_beta2 * (grad * grad - v); * param -= lr * m / (sqrt(v) + eps);''', * 'adam')(gradient, learn_rate, 1 - beta1, 1 - beta2, # <<<<<<<<<<<<<< * eps, weights, mom1, mom2) * gradient.fill(0) */ __pyx_t_3 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_beta1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_beta2, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); /* "thinc/neural/ops.pyx":1098 * param -= lr * m / (sqrt(v) + eps);''', * 'adam')(gradient, learn_rate, 1 - beta1, 1 - beta2, * eps, weights, mom1, mom2) # <<<<<<<<<<<<<< * gradient.fill(0) * */ __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[9] = {__pyx_t_5, __pyx_v_gradient, __pyx_v_learn_rate, __pyx_t_3, __pyx_t_4, __pyx_v_eps, __pyx_v_weights, __pyx_v_mom1, __pyx_v_mom2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 8+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[9] = {__pyx_t_5, __pyx_v_gradient, __pyx_v_learn_rate, __pyx_t_3, __pyx_t_4, __pyx_v_eps, __pyx_v_weights, __pyx_v_mom1, __pyx_v_mom2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 8+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_7 = PyTuple_New(8+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_INCREF(__pyx_v_gradient); __Pyx_GIVEREF(__pyx_v_gradient); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_gradient); __Pyx_INCREF(__pyx_v_learn_rate); __Pyx_GIVEREF(__pyx_v_learn_rate); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_v_learn_rate); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_6, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 3+__pyx_t_6, __pyx_t_4); __Pyx_INCREF(__pyx_v_eps); __Pyx_GIVEREF(__pyx_v_eps); PyTuple_SET_ITEM(__pyx_t_7, 4+__pyx_t_6, __pyx_v_eps); __Pyx_INCREF(__pyx_v_weights); __Pyx_GIVEREF(__pyx_v_weights); PyTuple_SET_ITEM(__pyx_t_7, 5+__pyx_t_6, __pyx_v_weights); __Pyx_INCREF(__pyx_v_mom1); __Pyx_GIVEREF(__pyx_v_mom1); PyTuple_SET_ITEM(__pyx_t_7, 6+__pyx_t_6, __pyx_v_mom1); __Pyx_INCREF(__pyx_v_mom2); __Pyx_GIVEREF(__pyx_v_mom2); PyTuple_SET_ITEM(__pyx_t_7, 7+__pyx_t_6, __pyx_v_mom2); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1099 * 'adam')(gradient, learn_rate, 1 - beta1, 1 - beta2, * eps, weights, mom1, mom2) * gradient.fill(0) # <<<<<<<<<<<<<< * * def normal_init(self, W, fan_in, inplace=True): */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_gradient, __pyx_n_s_fill); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_7, __pyx_int_0) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_int_0); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1089 * self.xp.scatter_add(out, ids, inputs) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.adam", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1101 * gradient.fill(0) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_47normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_47normal_init = {"normal_init", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_47normal_init, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_47normal_init(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_W = 0; PyObject *__pyx_v_fan_in = 0; PyObject *__pyx_v_inplace = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("normal_init (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_W,&__pyx_n_s_fan_in,&__pyx_n_s_inplace,0}; PyObject* values[4] = {0,0,0,0}; values[3] = ((PyObject *)((PyObject *)Py_True)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_W)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, 1); __PYX_ERR(0, 1101, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fan_in)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, 2); __PYX_ERR(0, 1101, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inplace); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal_init") < 0)) __PYX_ERR(0, 1101, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_self = values[0]; __pyx_v_W = values[1]; __pyx_v_fan_in = values[2]; __pyx_v_inplace = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("normal_init", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1101, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_46normal_init(__pyx_self, __pyx_v_self, __pyx_v_W, __pyx_v_fan_in, __pyx_v_inplace); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_46normal_init(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_W, PyObject *__pyx_v_fan_in, PyObject *__pyx_v_inplace) { PyObject *__pyx_v_scale = NULL; PyObject *__pyx_v_inits = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__115) __Pyx_RefNannySetupContext("normal_init", 0); __Pyx_TraceCall("normal_init", __pyx_f[0], 1101, 0, __PYX_ERR(0, 1101, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1102 * * def normal_init(self, W, fan_in, inplace=True): * scale = self.xp.sqrt(1. / fan_in) # <<<<<<<<<<<<<< * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyFloat_DivideCObj(__pyx_float_1_, __pyx_v_fan_in, 1., 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_scale = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1103 * def normal_init(self, W, fan_in, inplace=True): * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) # <<<<<<<<<<<<<< * inits = inits.reshape(W.shape) * if inplace: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_xp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_normal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_scale, __pyx_v_scale) < 0) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_prod); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_size, __pyx_t_4) < 0) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_inits = __pyx_t_4; __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1104 * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) # <<<<<<<<<<<<<< * if inplace: * copy_array(W, inits) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_inits, __pyx_n_s_reshape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_W, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_4 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_inits, __pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1105 * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, inits) * return W */ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_inplace); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 1105, __pyx_L1_error) if (__pyx_t_7) { /* "thinc/neural/ops.pyx":1106 * inits = inits.reshape(W.shape) * if inplace: * copy_array(W, inits) # <<<<<<<<<<<<<< * return W * else: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_W, __pyx_v_inits}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1106, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_W, __pyx_v_inits}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1106, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_2 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_INCREF(__pyx_v_W); __Pyx_GIVEREF(__pyx_v_W); PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_8, __pyx_v_W); __Pyx_INCREF(__pyx_v_inits); __Pyx_GIVEREF(__pyx_v_inits); PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_8, __pyx_v_inits); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1106, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1107 * if inplace: * copy_array(W, inits) * return W # <<<<<<<<<<<<<< * else: * return inits */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_W); __pyx_r = __pyx_v_W; goto __pyx_L0; /* "thinc/neural/ops.pyx":1105 * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) * inits = inits.reshape(W.shape) * if inplace: # <<<<<<<<<<<<<< * copy_array(W, inits) * return W */ } /* "thinc/neural/ops.pyx":1109 * return W * else: * return inits # <<<<<<<<<<<<<< * * def position_encode(self, *args, **kwargs): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_inits); __pyx_r = __pyx_v_inits; goto __pyx_L0; } /* "thinc/neural/ops.pyx":1101 * gradient.fill(0) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.normal_init", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_scale); __Pyx_XDECREF(__pyx_v_inits); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1111 * return inits * * def position_encode(self, *args, **kwargs): # <<<<<<<<<<<<<< * positions = NumpyOps().position_encode(*args, **kwargs) * return self.asarray(positions) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_49position_encode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_7CupyOps_49position_encode = {"position_encode", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_7CupyOps_49position_encode, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_7CupyOps_49position_encode(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_self = 0; PyObject *__pyx_v_args = 0; PyObject *__pyx_v_kwargs = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("position_encode (wrapper)", 0); __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; __Pyx_GOTREF(__pyx_v_kwargs); if (PyTuple_GET_SIZE(__pyx_args) > 1) { __pyx_v_args = PyTuple_GetSlice(__pyx_args, 1, PyTuple_GET_SIZE(__pyx_args)); if (unlikely(!__pyx_v_args)) { __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_RefNannyFinishContext(); return NULL; } __Pyx_GOTREF(__pyx_v_args); } else { __pyx_v_args = __pyx_empty_tuple; __Pyx_INCREF(__pyx_empty_tuple); } { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { default: case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { const Py_ssize_t used_pos_args = (pos_args < 1) ? pos_args : 1; if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, used_pos_args, "position_encode") < 0)) __PYX_ERR(0, 1111, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) < 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_self = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("position_encode", 0, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1111, __pyx_L3_error) __pyx_L3_error:; __Pyx_DECREF(__pyx_v_args); __pyx_v_args = 0; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("thinc.neural.ops.CupyOps.position_encode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_7CupyOps_48position_encode(__pyx_self, __pyx_v_self, __pyx_v_args, __pyx_v_kwargs); /* function exit code */ __Pyx_XDECREF(__pyx_v_args); __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_7CupyOps_48position_encode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs) { PyObject *__pyx_v_positions = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__116) __Pyx_RefNannySetupContext("position_encode", 0); __Pyx_TraceCall("position_encode", __pyx_f[0], 1111, 0, __PYX_ERR(0, 1111, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1112 * * def position_encode(self, *args, **kwargs): * positions = NumpyOps().position_encode(*args, **kwargs) # <<<<<<<<<<<<<< * return self.asarray(positions) * */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_NumpyOps); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_position_encode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_v_args, __pyx_v_kwargs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_positions = __pyx_t_1; __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1113 * def position_encode(self, *args, **kwargs): * positions = NumpyOps().position_encode(*args, **kwargs) * return self.asarray(positions) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_positions) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_positions); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thinc/neural/ops.pyx":1111 * return inits * * def position_encode(self, *args, **kwargs): # <<<<<<<<<<<<<< * positions = NumpyOps().position_encode(*args, **kwargs) * return self.asarray(positions) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("thinc.neural.ops.CupyOps.position_encode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_positions); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1116 * * * cdef void seq2col(float* output, const float* X, int nW, int B, int I) nogil: # <<<<<<<<<<<<<< * ''' * Let's say nW is 1 (it usually is). Then we want to take: */ static void __pyx_f_5thinc_6neural_3ops_seq2col(float *__pyx_v_output, float const *__pyx_v_X, int __pyx_v_nW, int __pyx_v_B, int __pyx_v_I) { long __pyx_v_nF; int __pyx_v_i; long __pyx_v_o_start; long __pyx_v_x_start; long __pyx_v_x_end; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("seq2col", __pyx_f[0], 1116, 1, __PYX_ERR(0, 1116, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1149 * * ''' * nF = nW * 2 + 1 # <<<<<<<<<<<<<< * for i in range(B): * o_start = i * I * nF */ __pyx_v_nF = ((__pyx_v_nW * 2) + 1); /* "thinc/neural/ops.pyx":1150 * ''' * nF = nW * 2 + 1 * for i in range(B): # <<<<<<<<<<<<<< * o_start = i * I * nF * x_start = (i-nW) * I */ __pyx_t_1 = __pyx_v_B; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":1151 * nF = nW * 2 + 1 * for i in range(B): * o_start = i * I * nF # <<<<<<<<<<<<<< * x_start = (i-nW) * I * x_end = (i+nW+1) * I */ __pyx_v_o_start = ((__pyx_v_i * __pyx_v_I) * __pyx_v_nF); /* "thinc/neural/ops.pyx":1152 * for i in range(B): * o_start = i * I * nF * x_start = (i-nW) * I # <<<<<<<<<<<<<< * x_end = (i+nW+1) * I * if x_start < 0: */ __pyx_v_x_start = ((__pyx_v_i - __pyx_v_nW) * __pyx_v_I); /* "thinc/neural/ops.pyx":1153 * o_start = i * I * nF * x_start = (i-nW) * I * x_end = (i+nW+1) * I # <<<<<<<<<<<<<< * if x_start < 0: * o_start += -x_start */ __pyx_v_x_end = (((__pyx_v_i + __pyx_v_nW) + 1) * __pyx_v_I); /* "thinc/neural/ops.pyx":1154 * x_start = (i-nW) * I * x_end = (i+nW+1) * I * if x_start < 0: # <<<<<<<<<<<<<< * o_start += -x_start * x_start = 0 */ __pyx_t_4 = ((__pyx_v_x_start < 0) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":1155 * x_end = (i+nW+1) * I * if x_start < 0: * o_start += -x_start # <<<<<<<<<<<<<< * x_start = 0 * if x_end >= B * I: */ __pyx_v_o_start = (__pyx_v_o_start + (-__pyx_v_x_start)); /* "thinc/neural/ops.pyx":1156 * if x_start < 0: * o_start += -x_start * x_start = 0 # <<<<<<<<<<<<<< * if x_end >= B * I: * x_end = B * I */ __pyx_v_x_start = 0; /* "thinc/neural/ops.pyx":1154 * x_start = (i-nW) * I * x_end = (i+nW+1) * I * if x_start < 0: # <<<<<<<<<<<<<< * o_start += -x_start * x_start = 0 */ } /* "thinc/neural/ops.pyx":1157 * o_start += -x_start * x_start = 0 * if x_end >= B * I: # <<<<<<<<<<<<<< * x_end = B * I * memcpy(&output[o_start], */ __pyx_t_4 = ((__pyx_v_x_end >= (__pyx_v_B * __pyx_v_I)) != 0); if (__pyx_t_4) { /* "thinc/neural/ops.pyx":1158 * x_start = 0 * if x_end >= B * I: * x_end = B * I # <<<<<<<<<<<<<< * memcpy(&output[o_start], * &X[x_start], (x_end-x_start) * sizeof(output[0])) */ __pyx_v_x_end = (__pyx_v_B * __pyx_v_I); /* "thinc/neural/ops.pyx":1157 * o_start += -x_start * x_start = 0 * if x_end >= B * I: # <<<<<<<<<<<<<< * x_end = B * I * memcpy(&output[o_start], */ } /* "thinc/neural/ops.pyx":1159 * if x_end >= B * I: * x_end = B * I * memcpy(&output[o_start], # <<<<<<<<<<<<<< * &X[x_start], (x_end-x_start) * sizeof(output[0])) * */ (void)(memcpy((&(__pyx_v_output[__pyx_v_o_start])), (&(__pyx_v_X[__pyx_v_x_start])), ((__pyx_v_x_end - __pyx_v_x_start) * (sizeof((__pyx_v_output[0])))))); } /* "thinc/neural/ops.pyx":1116 * * * cdef void seq2col(float* output, const float* X, int nW, int B, int I) nogil: # <<<<<<<<<<<<<< * ''' * Let's say nW is 1 (it usually is). Then we want to take: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1163 * * * cdef void backprop_seq2col(float* d_seqs, # <<<<<<<<<<<<<< * const float* d_cols, int B, int I, int nW) nogil: * # Here's what we're doing, if we had 2d indexing. */ static void __pyx_f_5thinc_6neural_3ops_backprop_seq2col(float *__pyx_v_d_seqs, float const *__pyx_v_d_cols, int __pyx_v_B, int __pyx_v_I, int __pyx_v_nW) { int __pyx_v_col_feat; long __pyx_v_nF; int __pyx_v_i; int __pyx_v_seq_row; long __pyx_v_f; long __pyx_v_col_row; long __pyx_v_j; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; long __pyx_t_4; long __pyx_t_5; long __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("backprop_seq2col", __pyx_f[0], 1163, 1, __PYX_ERR(0, 1163, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1173 * # d_seq[i] += d_cols[i+2, 0] * cdef int col_feat * nF = nW * 2 + 1 # <<<<<<<<<<<<<< * for i in range(B): * seq_row = i * I */ __pyx_v_nF = ((__pyx_v_nW * 2) + 1); /* "thinc/neural/ops.pyx":1174 * cdef int col_feat * nF = nW * 2 + 1 * for i in range(B): # <<<<<<<<<<<<<< * seq_row = i * I * col_feat = nF * I */ __pyx_t_1 = __pyx_v_B; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":1175 * nF = nW * 2 + 1 * for i in range(B): * seq_row = i * I # <<<<<<<<<<<<<< * col_feat = nF * I * for f in range(-nW, nW+1): */ __pyx_v_seq_row = (__pyx_v_i * __pyx_v_I); /* "thinc/neural/ops.pyx":1176 * for i in range(B): * seq_row = i * I * col_feat = nF * I # <<<<<<<<<<<<<< * for f in range(-nW, nW+1): * col_row = (i+f) * (I * nF) */ __pyx_v_col_feat = (__pyx_v_nF * __pyx_v_I); /* "thinc/neural/ops.pyx":1177 * seq_row = i * I * col_feat = nF * I * for f in range(-nW, nW+1): # <<<<<<<<<<<<<< * col_row = (i+f) * (I * nF) * col_feat -= I */ __pyx_t_4 = (__pyx_v_nW + 1); __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = (-__pyx_v_nW); __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_f = __pyx_t_6; /* "thinc/neural/ops.pyx":1178 * col_feat = nF * I * for f in range(-nW, nW+1): * col_row = (i+f) * (I * nF) # <<<<<<<<<<<<<< * col_feat -= I * if col_row >= 0 and (col_row < (B*I*nF)): */ __pyx_v_col_row = ((__pyx_v_i + __pyx_v_f) * (__pyx_v_I * __pyx_v_nF)); /* "thinc/neural/ops.pyx":1179 * for f in range(-nW, nW+1): * col_row = (i+f) * (I * nF) * col_feat -= I # <<<<<<<<<<<<<< * if col_row >= 0 and (col_row < (B*I*nF)): * j = col_row + col_feat */ __pyx_v_col_feat = (__pyx_v_col_feat - __pyx_v_I); /* "thinc/neural/ops.pyx":1180 * col_row = (i+f) * (I * nF) * col_feat -= I * if col_row >= 0 and (col_row < (B*I*nF)): # <<<<<<<<<<<<<< * j = col_row + col_feat * if j >= 0 and (j+I) < (B*I*nF): */ __pyx_t_8 = ((__pyx_v_col_row >= 0) != 0); if (__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L8_bool_binop_done; } __pyx_t_8 = ((__pyx_v_col_row < ((__pyx_v_B * __pyx_v_I) * __pyx_v_nF)) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L8_bool_binop_done:; if (__pyx_t_7) { /* "thinc/neural/ops.pyx":1181 * col_feat -= I * if col_row >= 0 and (col_row < (B*I*nF)): * j = col_row + col_feat # <<<<<<<<<<<<<< * if j >= 0 and (j+I) < (B*I*nF): * VecVec.add_i(&d_seqs[seq_row], */ __pyx_v_j = (__pyx_v_col_row + __pyx_v_col_feat); /* "thinc/neural/ops.pyx":1182 * if col_row >= 0 and (col_row < (B*I*nF)): * j = col_row + col_feat * if j >= 0 and (j+I) < (B*I*nF): # <<<<<<<<<<<<<< * VecVec.add_i(&d_seqs[seq_row], * &d_cols[j], 1., I) */ __pyx_t_8 = ((__pyx_v_j >= 0) != 0); if (__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L11_bool_binop_done; } __pyx_t_8 = (((__pyx_v_j + __pyx_v_I) < ((__pyx_v_B * __pyx_v_I) * __pyx_v_nF)) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L11_bool_binop_done:; if (__pyx_t_7) { /* "thinc/neural/ops.pyx":1183 * j = col_row + col_feat * if j >= 0 and (j+I) < (B*I*nF): * VecVec.add_i(&d_seqs[seq_row], # <<<<<<<<<<<<<< * &d_cols[j], 1., I) * */ __pyx_f_5thinc_6linalg_6VecVec_add_i((&(__pyx_v_d_seqs[__pyx_v_seq_row])), (&(__pyx_v_d_cols[__pyx_v_j])), 1., __pyx_v_I); /* "thinc/neural/ops.pyx":1182 * if col_row >= 0 and (col_row < (B*I*nF)): * j = col_row + col_feat * if j >= 0 and (j+I) < (B*I*nF): # <<<<<<<<<<<<<< * VecVec.add_i(&d_seqs[seq_row], * &d_cols[j], 1., I) */ } /* "thinc/neural/ops.pyx":1180 * col_row = (i+f) * (I * nF) * col_feat -= I * if col_row >= 0 and (col_row < (B*I*nF)): # <<<<<<<<<<<<<< * j = col_row + col_feat * if j >= 0 and (j+I) < (B*I*nF): */ } } } /* "thinc/neural/ops.pyx":1163 * * * cdef void backprop_seq2col(float* d_seqs, # <<<<<<<<<<<<<< * const float* d_cols, int B, int I, int nW) nogil: * # Here's what we're doing, if we had 2d indexing. */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.backprop_seq2col", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1187 * * * cdef void cpu_maxout(float* best__bo, int* which__bo, # <<<<<<<<<<<<<< * const float* cands__bop, int B, int O, int P) nogil: * for i in range(B*O): */ static void __pyx_f_5thinc_6neural_3ops_cpu_maxout(float *__pyx_v_best__bo, int *__pyx_v_which__bo, float const *__pyx_v_cands__bop, int __pyx_v_B, int __pyx_v_O, int __pyx_v_P) { int __pyx_v_i; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_maxout", __pyx_f[0], 1187, 1, __PYX_ERR(0, 1187, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1189 * cdef void cpu_maxout(float* best__bo, int* which__bo, * const float* cands__bop, int B, int O, int P) nogil: * for i in range(B*O): # <<<<<<<<<<<<<< * which__bo[i] = Vec.arg_max(&cands__bop[i*P], P) * best__bo[i] = cands__bop[i*P + which__bo[i]] */ __pyx_t_1 = (__pyx_v_B * __pyx_v_O); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "thinc/neural/ops.pyx":1190 * const float* cands__bop, int B, int O, int P) nogil: * for i in range(B*O): * which__bo[i] = Vec.arg_max(&cands__bop[i*P], P) # <<<<<<<<<<<<<< * best__bo[i] = cands__bop[i*P + which__bo[i]] * */ (__pyx_v_which__bo[__pyx_v_i]) = __pyx_f_5thinc_6linalg_3Vec_arg_max((&(__pyx_v_cands__bop[(__pyx_v_i * __pyx_v_P)])), __pyx_v_P); /* "thinc/neural/ops.pyx":1191 * for i in range(B*O): * which__bo[i] = Vec.arg_max(&cands__bop[i*P], P) * best__bo[i] = cands__bop[i*P + which__bo[i]] # <<<<<<<<<<<<<< * * */ (__pyx_v_best__bo[__pyx_v_i]) = (__pyx_v_cands__bop[((__pyx_v_i * __pyx_v_P) + (__pyx_v_which__bo[__pyx_v_i]))]); } /* "thinc/neural/ops.pyx":1187 * * * cdef void cpu_maxout(float* best__bo, int* which__bo, # <<<<<<<<<<<<<< * const float* cands__bop, int B, int O, int P) nogil: * for i in range(B*O): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1194 * * * cdef void cpu_backprop_maxout(float* dX__bop, # <<<<<<<<<<<<<< * const float* dX__bo, const int* which__bo, int B, int O, int P) nogil: * for b in range(B): */ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_maxout(float *__pyx_v_dX__bop, float const *__pyx_v_dX__bo, int const *__pyx_v_which__bo, int __pyx_v_B, int __pyx_v_O, int __pyx_v_P) { CYTHON_UNUSED int __pyx_v_b; CYTHON_UNUSED int __pyx_v_o; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_backprop_maxout", __pyx_f[0], 1194, 1, __PYX_ERR(0, 1194, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1196 * cdef void cpu_backprop_maxout(float* dX__bop, * const float* dX__bo, const int* which__bo, int B, int O, int P) nogil: * for b in range(B): # <<<<<<<<<<<<<< * for o in range(O): * dX__bop[which__bo[0]] = dX__bo[0] */ __pyx_t_1 = __pyx_v_B; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_b = __pyx_t_3; /* "thinc/neural/ops.pyx":1197 * const float* dX__bo, const int* which__bo, int B, int O, int P) nogil: * for b in range(B): * for o in range(O): # <<<<<<<<<<<<<< * dX__bop[which__bo[0]] = dX__bo[0] * dX__bop += P */ __pyx_t_4 = __pyx_v_O; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_o = __pyx_t_6; /* "thinc/neural/ops.pyx":1198 * for b in range(B): * for o in range(O): * dX__bop[which__bo[0]] = dX__bo[0] # <<<<<<<<<<<<<< * dX__bop += P * dX__bo += 1 */ (__pyx_v_dX__bop[(__pyx_v_which__bo[0])]) = (__pyx_v_dX__bo[0]); /* "thinc/neural/ops.pyx":1199 * for o in range(O): * dX__bop[which__bo[0]] = dX__bo[0] * dX__bop += P # <<<<<<<<<<<<<< * dX__bo += 1 * which__bo += 1 */ __pyx_v_dX__bop = (__pyx_v_dX__bop + __pyx_v_P); /* "thinc/neural/ops.pyx":1200 * dX__bop[which__bo[0]] = dX__bo[0] * dX__bop += P * dX__bo += 1 # <<<<<<<<<<<<<< * which__bo += 1 * */ __pyx_v_dX__bo = (__pyx_v_dX__bo + 1); /* "thinc/neural/ops.pyx":1201 * dX__bop += P * dX__bo += 1 * which__bo += 1 # <<<<<<<<<<<<<< * * */ __pyx_v_which__bo = (__pyx_v_which__bo + 1); } } /* "thinc/neural/ops.pyx":1194 * * * cdef void cpu_backprop_maxout(float* dX__bop, # <<<<<<<<<<<<<< * const float* dX__bo, const int* which__bo, int B, int O, int P) nogil: * for b in range(B): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_backprop_maxout", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1204 * * * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): # <<<<<<<<<<<<<< * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_1cpu_clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_1cpu_clip_gradient = {"cpu_clip_gradient", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_1cpu_clip_gradient, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_1cpu_clip_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_gradient = { 0, 0, { 0 }, { 0 }, { 0 } }; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_threshold; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cpu_clip_gradient (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_gradient,&__pyx_n_s_threshold,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("cpu_clip_gradient", 1, 2, 2, 1); __PYX_ERR(0, 1204, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cpu_clip_gradient") < 0)) __PYX_ERR(0, 1204, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_gradient = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_5thinc_8typedefs_weight_t(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_gradient.memview)) __PYX_ERR(0, 1204, __pyx_L3_error) __pyx_v_threshold = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_threshold == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 1204, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cpu_clip_gradient", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1204, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.cpu_clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_cpu_clip_gradient(__pyx_self, __pyx_v_gradient, __pyx_v_threshold); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_cpu_clip_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_gradient, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_threshold) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_grad_norm; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__117) __Pyx_RefNannySetupContext("cpu_clip_gradient", 0); __Pyx_TraceCall("cpu_clip_gradient", __pyx_f[0], 1204, 0, __PYX_ERR(0, 1204, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1205 * * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) # <<<<<<<<<<<<<< * if grad_norm >= threshold: * Vec.mul_i(&gradient[0], threshold / grad_norm, gradient.shape[0]) */ __pyx_t_1 = 0; __pyx_t_2 = -1; if (__pyx_t_1 < 0) { __pyx_t_1 += __pyx_v_gradient.shape[0]; if (unlikely(__pyx_t_1 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_1 >= __pyx_v_gradient.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 1205, __pyx_L1_error) } __pyx_v_grad_norm = __pyx_f_5thinc_6linalg_3Vec_norm((&(*((__pyx_t_5thinc_8typedefs_weight_t *) ( /* dim=0 */ ((char *) (((__pyx_t_5thinc_8typedefs_weight_t *) __pyx_v_gradient.data) + __pyx_t_1)) )))), (__pyx_v_gradient.shape[0])); /* "thinc/neural/ops.pyx":1206 * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * Vec.mul_i(&gradient[0], threshold / grad_norm, gradient.shape[0]) * */ __pyx_t_3 = ((__pyx_v_grad_norm >= __pyx_v_threshold) != 0); if (__pyx_t_3) { /* "thinc/neural/ops.pyx":1207 * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: * Vec.mul_i(&gradient[0], threshold / grad_norm, gradient.shape[0]) # <<<<<<<<<<<<<< * * */ __pyx_t_1 = 0; __pyx_t_2 = -1; if (__pyx_t_1 < 0) { __pyx_t_1 += __pyx_v_gradient.shape[0]; if (unlikely(__pyx_t_1 < 0)) __pyx_t_2 = 0; } else if (unlikely(__pyx_t_1 >= __pyx_v_gradient.shape[0])) __pyx_t_2 = 0; if (unlikely(__pyx_t_2 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_2); __PYX_ERR(0, 1207, __pyx_L1_error) } __pyx_f_5thinc_6linalg_3Vec_mul_i((&(*((__pyx_t_5thinc_8typedefs_weight_t *) ( /* dim=0 */ ((char *) (((__pyx_t_5thinc_8typedefs_weight_t *) __pyx_v_gradient.data) + __pyx_t_1)) )))), (__pyx_v_threshold / __pyx_v_grad_norm), (__pyx_v_gradient.shape[0])); /* "thinc/neural/ops.pyx":1206 * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: # <<<<<<<<<<<<<< * Vec.mul_i(&gradient[0], threshold / grad_norm, gradient.shape[0]) * */ } /* "thinc/neural/ops.pyx":1204 * * * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): # <<<<<<<<<<<<<< * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("thinc.neural.ops.cpu_clip_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_gradient, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1210 * * * def add_gradient_noise(float[::1] gradient, weight_t noise_level, # <<<<<<<<<<<<<< * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) */ /* Python wrapper */ static PyObject *__pyx_pw_5thinc_6neural_3ops_3add_gradient_noise(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5thinc_6neural_3ops_3add_gradient_noise = {"add_gradient_noise", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5thinc_6neural_3ops_3add_gradient_noise, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5thinc_6neural_3ops_3add_gradient_noise(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_gradient = { 0, 0, { 0 }, { 0 }, { 0 } }; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_noise_level; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_timestep; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_gradient_noise (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_gradient,&__pyx_n_s_noise_level,&__pyx_n_s_timestep,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_noise_level)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_gradient_noise", 1, 3, 3, 1); __PYX_ERR(0, 1210, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_timestep)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_gradient_noise", 1, 3, 3, 2); __PYX_ERR(0, 1210, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add_gradient_noise") < 0)) __PYX_ERR(0, 1210, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_gradient = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_gradient.memview)) __PYX_ERR(0, 1210, __pyx_L3_error) __pyx_v_noise_level = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_noise_level == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 1210, __pyx_L3_error) __pyx_v_timestep = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_timestep == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 1211, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add_gradient_noise", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1210, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("thinc.neural.ops.add_gradient_noise", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5thinc_6neural_3ops_2add_gradient_noise(__pyx_self, __pyx_v_gradient, __pyx_v_noise_level, __pyx_v_timestep); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5thinc_6neural_3ops_2add_gradient_noise(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_gradient, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_noise_level, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_timestep) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_variance; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__118) __Pyx_RefNannySetupContext("add_gradient_noise", 0); __Pyx_TraceCall("add_gradient_noise", __pyx_f[0], 1210, 0, __PYX_ERR(0, 1210, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1212 * def add_gradient_noise(float[::1] gradient, weight_t noise_level, * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) # <<<<<<<<<<<<<< * if variance >= 0.000001: * gradient += numpy.asarray( */ __pyx_v_variance = (__pyx_v_noise_level / pow(((double)(1.0 + __pyx_v_timestep)), 0.55)); /* "thinc/neural/ops.pyx":1213 * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) * if variance >= 0.000001: # <<<<<<<<<<<<<< * gradient += numpy.asarray( * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), */ __pyx_t_1 = ((__pyx_v_variance >= 0.000001) != 0); if (__pyx_t_1) { /* "thinc/neural/ops.pyx":1214 * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) * if variance >= 0.000001: * gradient += numpy.asarray( # <<<<<<<<<<<<<< * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), * dtype='float32') */ __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_gradient, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_asarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "thinc/neural/ops.pyx":1215 * if variance >= 0.000001: * gradient += numpy.asarray( * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), # <<<<<<<<<<<<<< * dtype='float32') * */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_normal); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyFloat_FromDouble(__pyx_v_variance); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_scale, __pyx_t_6) < 0) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_loc, __pyx_float_0_) < 0) __PYX_ERR(0, 1215, __pyx_L1_error) __pyx_t_7 = __Pyx_MemoryView_Len(__pyx_v_gradient); __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_size, __pyx_t_6) < 0) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "thinc/neural/ops.pyx":1214 * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) * if variance >= 0.000001: * gradient += numpy.asarray( # <<<<<<<<<<<<<< * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), * dtype='float32') */ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __pyx_t_6 = 0; /* "thinc/neural/ops.pyx":1216 * gradient += numpy.asarray( * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), * dtype='float32') # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 1216, __pyx_L1_error) /* "thinc/neural/ops.pyx":1214 * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) * if variance >= 0.000001: * gradient += numpy.asarray( # <<<<<<<<<<<<<< * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), * dtype='float32') */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(__pyx_t_6, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 1214, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_gradient, 1); __pyx_v_gradient = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "thinc/neural/ops.pyx":1213 * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) * if variance >= 0.000001: # <<<<<<<<<<<<<< * gradient += numpy.asarray( * numpy.random.normal(scale=variance, loc=0., size=len(gradient)), */ } /* "thinc/neural/ops.pyx":1210 * * * def add_gradient_noise(float[::1] gradient, weight_t noise_level, # <<<<<<<<<<<<<< * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __Pyx_AddTraceback("thinc.neural.ops.add_gradient_noise", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_gradient, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1220 * * * cdef cpu_floats_ptr2array(float* ptr, shape): # <<<<<<<<<<<<<< * cdef ndarray py_out = numpy.zeros(shape, dtype='float32') * cdef int N = numpy.prod(shape) */ static PyObject *__pyx_f_5thinc_6neural_3ops_cpu_floats_ptr2array(float *__pyx_v_ptr, PyObject *__pyx_v_shape) { PyArrayObject *__pyx_v_py_out = 0; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cpu_floats_ptr2array", 0); __Pyx_TraceCall("cpu_floats_ptr2array", __pyx_f[0], 1220, 0, __PYX_ERR(0, 1220, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1221 * * cdef cpu_floats_ptr2array(float* ptr, shape): * cdef ndarray py_out = numpy.zeros(shape, dtype='float32') # <<<<<<<<<<<<<< * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_float32) < 0) __PYX_ERR(0, 1221, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1221, __pyx_L1_error) __pyx_v_py_out = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1222 * cdef cpu_floats_ptr2array(float* ptr, shape): * cdef ndarray py_out = numpy.zeros(shape, dtype='float32') * cdef int N = numpy.prod(shape) # <<<<<<<<<<<<<< * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) * return py_out */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_prod); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_4 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_N = __pyx_t_5; /* "thinc/neural/ops.pyx":1223 * cdef ndarray py_out = numpy.zeros(shape, dtype='float32') * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) # <<<<<<<<<<<<<< * return py_out * */ (void)(memcpy(__pyx_v_py_out->data, __pyx_v_ptr, (__pyx_v_N * (sizeof((__pyx_v_ptr[0])))))); /* "thinc/neural/ops.pyx":1224 * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) * return py_out # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_py_out)); __pyx_r = ((PyObject *)__pyx_v_py_out); goto __pyx_L0; /* "thinc/neural/ops.pyx":1220 * * * cdef cpu_floats_ptr2array(float* ptr, shape): # <<<<<<<<<<<<<< * cdef ndarray py_out = numpy.zeros(shape, dtype='float32') * cdef int N = numpy.prod(shape) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.cpu_floats_ptr2array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_py_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1227 * * * cdef cpu_ints_ptr2array(int* ptr, shape): # <<<<<<<<<<<<<< * cdef ndarray py_out = numpy.zeros(shape, dtype='int32') * cdef int N = numpy.prod(shape) */ static PyObject *__pyx_f_5thinc_6neural_3ops_cpu_ints_ptr2array(int *__pyx_v_ptr, PyObject *__pyx_v_shape) { PyArrayObject *__pyx_v_py_out = 0; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cpu_ints_ptr2array", 0); __Pyx_TraceCall("cpu_ints_ptr2array", __pyx_f[0], 1227, 0, __PYX_ERR(0, 1227, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1228 * * cdef cpu_ints_ptr2array(int* ptr, shape): * cdef ndarray py_out = numpy.zeros(shape, dtype='int32') # <<<<<<<<<<<<<< * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_s_int32) < 0) __PYX_ERR(0, 1228, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 1228, __pyx_L1_error) __pyx_v_py_out = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "thinc/neural/ops.pyx":1229 * cdef cpu_ints_ptr2array(int* ptr, shape): * cdef ndarray py_out = numpy.zeros(shape, dtype='int32') * cdef int N = numpy.prod(shape) # <<<<<<<<<<<<<< * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) * return py_out */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_prod); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_4 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_shape) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_shape); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 1229, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_N = __pyx_t_5; /* "thinc/neural/ops.pyx":1230 * cdef ndarray py_out = numpy.zeros(shape, dtype='int32') * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) # <<<<<<<<<<<<<< * return py_out * */ (void)(memcpy(__pyx_v_py_out->data, __pyx_v_ptr, (__pyx_v_N * (sizeof((__pyx_v_ptr[0])))))); /* "thinc/neural/ops.pyx":1231 * cdef int N = numpy.prod(shape) * memcpy(py_out.data, ptr, N * sizeof(ptr[0])) * return py_out # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_py_out)); __pyx_r = ((PyObject *)__pyx_v_py_out); goto __pyx_L0; /* "thinc/neural/ops.pyx":1227 * * * cdef cpu_ints_ptr2array(int* ptr, shape): # <<<<<<<<<<<<<< * cdef ndarray py_out = numpy.zeros(shape, dtype='int32') * cdef int N = numpy.prod(shape) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("thinc.neural.ops.cpu_ints_ptr2array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_py_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "thinc/neural/ops.pyx":1234 * * * cdef void cpu_mean_pool(float* means__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_mean_pool(float *__pyx_v_means__bo, float const *__pyx_v_X__to, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { float __pyx_v_scale; int __pyx_v_length; CYTHON_UNUSED int __pyx_v__; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_mean_pool", __pyx_f[0], 1234, 1, __PYX_ERR(0, 1234, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1238 * int B, int T, int O) nogil: * '''Compute means of a batch of concatenated sequences, using the lengths.''' * cdef float scale = 0. # <<<<<<<<<<<<<< * for length in lengths__b[:B]: * scale = 1. / length */ __pyx_v_scale = 0.; /* "thinc/neural/ops.pyx":1239 * '''Compute means of a batch of concatenated sequences, using the lengths.''' * cdef float scale = 0. * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * scale = 1. / length * for _ in range(length): */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1240 * cdef float scale = 0. * for length in lengths__b[:B]: * scale = 1. / length # <<<<<<<<<<<<<< * for _ in range(length): * VecVec.add_i(means__bo, */ __pyx_v_scale = (1. / __pyx_v_length); /* "thinc/neural/ops.pyx":1241 * for length in lengths__b[:B]: * scale = 1. / length * for _ in range(length): # <<<<<<<<<<<<<< * VecVec.add_i(means__bo, * X__to, scale, O) */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v__ = __pyx_t_6; /* "thinc/neural/ops.pyx":1242 * scale = 1. / length * for _ in range(length): * VecVec.add_i(means__bo, # <<<<<<<<<<<<<< * X__to, scale, O) * X__to += O */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_means__bo, __pyx_v_X__to, __pyx_v_scale, __pyx_v_O); /* "thinc/neural/ops.pyx":1244 * VecVec.add_i(means__bo, * X__to, scale, O) * X__to += O # <<<<<<<<<<<<<< * means__bo += O * */ __pyx_v_X__to = (__pyx_v_X__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1245 * X__to, scale, O) * X__to += O * means__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_means__bo = (__pyx_v_means__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1234 * * * cdef void cpu_mean_pool(float* means__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1248 * * * cdef void cpu_backprop_mean_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_means__bo, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_mean_pool(float *__pyx_v_dX__to, float const *__pyx_v_d_means__bo, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { float __pyx_v_scale; int __pyx_v_length; CYTHON_UNUSED int __pyx_v__; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_backprop_mean_pool", __pyx_f[0], 1248, 1, __PYX_ERR(0, 1248, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1251 * const float* d_means__bo, const int* lengths__b, * int B, int T, int O) nogil: * cdef float scale = 0. # <<<<<<<<<<<<<< * for length in lengths__b[:B]: * scale = 1./ length */ __pyx_v_scale = 0.; /* "thinc/neural/ops.pyx":1252 * int B, int T, int O) nogil: * cdef float scale = 0. * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * scale = 1./ length * for _ in range(length): */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1253 * cdef float scale = 0. * for length in lengths__b[:B]: * scale = 1./ length # <<<<<<<<<<<<<< * for _ in range(length): * VecVec.add_i(dX__to, */ __pyx_v_scale = (1. / __pyx_v_length); /* "thinc/neural/ops.pyx":1254 * for length in lengths__b[:B]: * scale = 1./ length * for _ in range(length): # <<<<<<<<<<<<<< * VecVec.add_i(dX__to, * d_means__bo, scale, O) */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v__ = __pyx_t_6; /* "thinc/neural/ops.pyx":1255 * scale = 1./ length * for _ in range(length): * VecVec.add_i(dX__to, # <<<<<<<<<<<<<< * d_means__bo, scale, O) * dX__to += O */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_dX__to, __pyx_v_d_means__bo, __pyx_v_scale, __pyx_v_O); /* "thinc/neural/ops.pyx":1257 * VecVec.add_i(dX__to, * d_means__bo, scale, O) * dX__to += O # <<<<<<<<<<<<<< * d_means__bo += O * */ __pyx_v_dX__to = (__pyx_v_dX__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1258 * d_means__bo, scale, O) * dX__to += O * d_means__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_d_means__bo = (__pyx_v_d_means__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1248 * * * cdef void cpu_backprop_mean_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_means__bo, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_backprop_mean_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1261 * * * cdef void cpu_sum_pool(float* sums__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_sum_pool(float *__pyx_v_sums__bo, float const *__pyx_v_X__to, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { int __pyx_v_length; CYTHON_UNUSED int __pyx_v__; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_sum_pool", __pyx_f[0], 1261, 1, __PYX_ERR(0, 1261, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1265 * int B, int T, int O) nogil: * '''Compute sums of a batch of concatenated sequences, using the lengths.''' * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * for _ in range(length): * VecVec.add_i(sums__bo, */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1266 * '''Compute sums of a batch of concatenated sequences, using the lengths.''' * for length in lengths__b[:B]: * for _ in range(length): # <<<<<<<<<<<<<< * VecVec.add_i(sums__bo, * X__to, 1.0, O) */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v__ = __pyx_t_6; /* "thinc/neural/ops.pyx":1267 * for length in lengths__b[:B]: * for _ in range(length): * VecVec.add_i(sums__bo, # <<<<<<<<<<<<<< * X__to, 1.0, O) * X__to += O */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_sums__bo, __pyx_v_X__to, 1.0, __pyx_v_O); /* "thinc/neural/ops.pyx":1269 * VecVec.add_i(sums__bo, * X__to, 1.0, O) * X__to += O # <<<<<<<<<<<<<< * sums__bo += O * */ __pyx_v_X__to = (__pyx_v_X__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1270 * X__to, 1.0, O) * X__to += O * sums__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_sums__bo = (__pyx_v_sums__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1261 * * * cdef void cpu_sum_pool(float* sums__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1273 * * * cdef void cpu_backprop_sum_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_sums__bo, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_sum_pool(float *__pyx_v_dX__to, float const *__pyx_v_d_sums__bo, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { int __pyx_v_length; CYTHON_UNUSED int __pyx_v__; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_backprop_sum_pool", __pyx_f[0], 1273, 1, __PYX_ERR(0, 1273, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1276 * const float* d_sums__bo, const int* lengths__b, * int B, int T, int O) nogil: * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * for _ in range(length): * VecVec.add_i(dX__to, */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1277 * int B, int T, int O) nogil: * for length in lengths__b[:B]: * for _ in range(length): # <<<<<<<<<<<<<< * VecVec.add_i(dX__to, * d_sums__bo, 1.0, O) */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v__ = __pyx_t_6; /* "thinc/neural/ops.pyx":1278 * for length in lengths__b[:B]: * for _ in range(length): * VecVec.add_i(dX__to, # <<<<<<<<<<<<<< * d_sums__bo, 1.0, O) * dX__to += O */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_dX__to, __pyx_v_d_sums__bo, 1.0, __pyx_v_O); /* "thinc/neural/ops.pyx":1280 * VecVec.add_i(dX__to, * d_sums__bo, 1.0, O) * dX__to += O # <<<<<<<<<<<<<< * d_sums__bo += O * */ __pyx_v_dX__to = (__pyx_v_dX__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1281 * d_sums__bo, 1.0, O) * dX__to += O * d_sums__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_d_sums__bo = (__pyx_v_d_sums__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1273 * * * cdef void cpu_backprop_sum_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_sums__bo, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_backprop_sum_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1284 * * * cdef void cpu_max_pool(float* maxes__bo, int* which__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_max_pool(float *__pyx_v_maxes__bo, int *__pyx_v_which__bo, float const *__pyx_v_X__to, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { CYTHON_UNUSED float __pyx_v_scale; int __pyx_v_length; long __pyx_v_i; int __pyx_v_j; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; long __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_max_pool", __pyx_f[0], 1284, 1, __PYX_ERR(0, 1284, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1288 * int B, int T, int O) nogil: * '''Compute maxes of a batch of concatenated sequences, using the lengths.''' * cdef float scale = 0. # <<<<<<<<<<<<<< * for length in lengths__b[:B]: * memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) */ __pyx_v_scale = 0.; /* "thinc/neural/ops.pyx":1289 * '''Compute maxes of a batch of concatenated sequences, using the lengths.''' * cdef float scale = 0. * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) * memset(which__bo, 0, O * sizeof(which__bo[0])) */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1290 * cdef float scale = 0. * for length in lengths__b[:B]: * memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) # <<<<<<<<<<<<<< * memset(which__bo, 0, O * sizeof(which__bo[0])) * X__to += O */ (void)(memcpy(__pyx_v_maxes__bo, __pyx_v_X__to, (__pyx_v_O * (sizeof((__pyx_v_maxes__bo[0])))))); /* "thinc/neural/ops.pyx":1291 * for length in lengths__b[:B]: * memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) * memset(which__bo, 0, O * sizeof(which__bo[0])) # <<<<<<<<<<<<<< * X__to += O * for i in range(1, length): */ (void)(memset(__pyx_v_which__bo, 0, (__pyx_v_O * (sizeof((__pyx_v_which__bo[0])))))); /* "thinc/neural/ops.pyx":1292 * memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) * memset(which__bo, 0, O * sizeof(which__bo[0])) * X__to += O # <<<<<<<<<<<<<< * for i in range(1, length): * for j in range(O): */ __pyx_v_X__to = (__pyx_v_X__to + __pyx_v_O); /* "thinc/neural/ops.pyx":1293 * memset(which__bo, 0, O * sizeof(which__bo[0])) * X__to += O * for i in range(1, length): # <<<<<<<<<<<<<< * for j in range(O): * if X__to[j] > maxes__bo[j]: */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 1; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "thinc/neural/ops.pyx":1294 * X__to += O * for i in range(1, length): * for j in range(O): # <<<<<<<<<<<<<< * if X__to[j] > maxes__bo[j]: * maxes__bo[j] = X__to[j] */ __pyx_t_7 = __pyx_v_O; __pyx_t_8 = __pyx_t_7; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "thinc/neural/ops.pyx":1295 * for i in range(1, length): * for j in range(O): * if X__to[j] > maxes__bo[j]: # <<<<<<<<<<<<<< * maxes__bo[j] = X__to[j] * which__bo[j] = i */ __pyx_t_10 = (((__pyx_v_X__to[__pyx_v_j]) > (__pyx_v_maxes__bo[__pyx_v_j])) != 0); if (__pyx_t_10) { /* "thinc/neural/ops.pyx":1296 * for j in range(O): * if X__to[j] > maxes__bo[j]: * maxes__bo[j] = X__to[j] # <<<<<<<<<<<<<< * which__bo[j] = i * X__to += O */ (__pyx_v_maxes__bo[__pyx_v_j]) = (__pyx_v_X__to[__pyx_v_j]); /* "thinc/neural/ops.pyx":1297 * if X__to[j] > maxes__bo[j]: * maxes__bo[j] = X__to[j] * which__bo[j] = i # <<<<<<<<<<<<<< * X__to += O * maxes__bo += O */ (__pyx_v_which__bo[__pyx_v_j]) = __pyx_v_i; /* "thinc/neural/ops.pyx":1295 * for i in range(1, length): * for j in range(O): * if X__to[j] > maxes__bo[j]: # <<<<<<<<<<<<<< * maxes__bo[j] = X__to[j] * which__bo[j] = i */ } } /* "thinc/neural/ops.pyx":1298 * maxes__bo[j] = X__to[j] * which__bo[j] = i * X__to += O # <<<<<<<<<<<<<< * maxes__bo += O * which__bo += O */ __pyx_v_X__to = (__pyx_v_X__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1299 * which__bo[j] = i * X__to += O * maxes__bo += O # <<<<<<<<<<<<<< * which__bo += O * */ __pyx_v_maxes__bo = (__pyx_v_maxes__bo + __pyx_v_O); /* "thinc/neural/ops.pyx":1300 * X__to += O * maxes__bo += O * which__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_which__bo = (__pyx_v_which__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1284 * * * cdef void cpu_max_pool(float* maxes__bo, int* which__bo, # <<<<<<<<<<<<<< * const float* X__to, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1303 * * * cdef void cpu_backprop_max_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_maxes__bo, const int* which__bo, const int* lengths__b, * int B, int T, int O) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_backprop_max_pool(float *__pyx_v_dX__to, float const *__pyx_v_d_maxes__bo, int const *__pyx_v_which__bo, int const *__pyx_v_lengths__b, int __pyx_v_B, CYTHON_UNUSED int __pyx_v_T, int __pyx_v_O) { int __pyx_v_length; int __pyx_v_i; int __pyx_v_j; __Pyx_TraceDeclarations int const *__pyx_t_1; int const *__pyx_t_2; int const *__pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_backprop_max_pool", __pyx_f[0], 1303, 1, __PYX_ERR(0, 1303, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1307 * int B, int T, int O) nogil: * cdef int length, i, j * for length in lengths__b[:B]: # <<<<<<<<<<<<<< * for i in range(length): * for j in range(O): */ __pyx_t_2 = (__pyx_v_lengths__b + __pyx_v_B); for (__pyx_t_3 = __pyx_v_lengths__b; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_length = (__pyx_t_1[0]); /* "thinc/neural/ops.pyx":1308 * cdef int length, i, j * for length in lengths__b[:B]: * for i in range(length): # <<<<<<<<<<<<<< * for j in range(O): * if which__bo[j] == i: */ __pyx_t_4 = __pyx_v_length; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "thinc/neural/ops.pyx":1309 * for length in lengths__b[:B]: * for i in range(length): * for j in range(O): # <<<<<<<<<<<<<< * if which__bo[j] == i: * dX__to[j] += d_maxes__bo[j] */ __pyx_t_7 = __pyx_v_O; __pyx_t_8 = __pyx_t_7; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "thinc/neural/ops.pyx":1310 * for i in range(length): * for j in range(O): * if which__bo[j] == i: # <<<<<<<<<<<<<< * dX__to[j] += d_maxes__bo[j] * dX__to += O */ __pyx_t_10 = (((__pyx_v_which__bo[__pyx_v_j]) == __pyx_v_i) != 0); if (__pyx_t_10) { /* "thinc/neural/ops.pyx":1311 * for j in range(O): * if which__bo[j] == i: * dX__to[j] += d_maxes__bo[j] # <<<<<<<<<<<<<< * dX__to += O * d_maxes__bo += O */ __pyx_t_11 = __pyx_v_j; (__pyx_v_dX__to[__pyx_t_11]) = ((__pyx_v_dX__to[__pyx_t_11]) + (__pyx_v_d_maxes__bo[__pyx_v_j])); /* "thinc/neural/ops.pyx":1310 * for i in range(length): * for j in range(O): * if which__bo[j] == i: # <<<<<<<<<<<<<< * dX__to[j] += d_maxes__bo[j] * dX__to += O */ } } /* "thinc/neural/ops.pyx":1312 * if which__bo[j] == i: * dX__to[j] += d_maxes__bo[j] * dX__to += O # <<<<<<<<<<<<<< * d_maxes__bo += O * which__bo += O */ __pyx_v_dX__to = (__pyx_v_dX__to + __pyx_v_O); } /* "thinc/neural/ops.pyx":1313 * dX__to[j] += d_maxes__bo[j] * dX__to += O * d_maxes__bo += O # <<<<<<<<<<<<<< * which__bo += O * */ __pyx_v_d_maxes__bo = (__pyx_v_d_maxes__bo + __pyx_v_O); /* "thinc/neural/ops.pyx":1314 * dX__to += O * d_maxes__bo += O * which__bo += O # <<<<<<<<<<<<<< * * */ __pyx_v_which__bo = (__pyx_v_which__bo + __pyx_v_O); } /* "thinc/neural/ops.pyx":1303 * * * cdef void cpu_backprop_max_pool(float* dX__to, # <<<<<<<<<<<<<< * const float* d_maxes__bo, const int* which__bo, const int* lengths__b, * int B, int T, int O) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_backprop_max_pool", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1317 * * * cdef inline float sigmoid(float X) nogil: # <<<<<<<<<<<<<< * return 1./(1. + expf(-X)) * */ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_sigmoid(float __pyx_v_X) { float __pyx_r; __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("sigmoid", __pyx_f[0], 1317, 1, __PYX_ERR(0, 1317, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1318 * * cdef inline float sigmoid(float X) nogil: * return 1./(1. + expf(-X)) # <<<<<<<<<<<<<< * * */ __pyx_r = (1. / (1. + expf((-__pyx_v_X)))); goto __pyx_L0; /* "thinc/neural/ops.pyx":1317 * * * cdef inline float sigmoid(float X) nogil: # <<<<<<<<<<<<<< * return 1./(1. + expf(-X)) * */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.sigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "thinc/neural/ops.pyx":1321 * * * cdef inline float dsigmoid(float y) nogil: # <<<<<<<<<<<<<< * return y*(1-y) * */ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_dsigmoid(float __pyx_v_y) { float __pyx_r; __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("dsigmoid", __pyx_f[0], 1321, 1, __PYX_ERR(0, 1321, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1322 * * cdef inline float dsigmoid(float y) nogil: * return y*(1-y) # <<<<<<<<<<<<<< * * */ __pyx_r = (__pyx_v_y * (1.0 - __pyx_v_y)); goto __pyx_L0; /* "thinc/neural/ops.pyx":1321 * * * cdef inline float dsigmoid(float y) nogil: # <<<<<<<<<<<<<< * return y*(1-y) * */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.dsigmoid", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "thinc/neural/ops.pyx":1325 * * * cdef inline float dtanh(float y) nogil: # <<<<<<<<<<<<<< * return 1-y**2 * */ static CYTHON_INLINE float __pyx_f_5thinc_6neural_3ops_dtanh(float __pyx_v_y) { float __pyx_r; __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("dtanh", __pyx_f[0], 1325, 1, __PYX_ERR(0, 1325, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1326 * * cdef inline float dtanh(float y) nogil: * return 1-y**2 # <<<<<<<<<<<<<< * * */ __pyx_r = (1.0 - powf(__pyx_v_y, 2.0)); goto __pyx_L0; /* "thinc/neural/ops.pyx":1325 * * * cdef inline float dtanh(float y) nogil: # <<<<<<<<<<<<<< * return 1-y**2 * */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.dtanh", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "thinc/neural/ops.pyx":1329 * * * cdef void cpu_lstm_gates_fwd(float* output, float* cells, float* gates, # <<<<<<<<<<<<<< * const float* prev, int B, int N) nogil: * cdef float hf, hi, ho, hc */ static void __pyx_f_5thinc_6neural_3ops_cpu_lstm_gates_fwd(float *__pyx_v_output, float *__pyx_v_cells, float *__pyx_v_gates, float const *__pyx_v_prev, int __pyx_v_B, int __pyx_v_N) { float __pyx_v_hf; float __pyx_v_hi; float __pyx_v_ho; float __pyx_v_hc; int __pyx_v_i; CYTHON_UNUSED int __pyx_v_b; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_lstm_gates_fwd", __pyx_f[0], 1329, 1, __PYX_ERR(0, 1329, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1333 * cdef float hf, hi, ho, hc * cdef int i, b * for b in range(B): # <<<<<<<<<<<<<< * for i in range(N): * hf = sigmoid(gates[i*4+0]) */ __pyx_t_1 = __pyx_v_B; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_b = __pyx_t_3; /* "thinc/neural/ops.pyx":1334 * cdef int i, b * for b in range(B): * for i in range(N): # <<<<<<<<<<<<<< * hf = sigmoid(gates[i*4+0]) * hi = sigmoid(gates[i*4+1]) */ __pyx_t_4 = __pyx_v_N; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "thinc/neural/ops.pyx":1335 * for b in range(B): * for i in range(N): * hf = sigmoid(gates[i*4+0]) # <<<<<<<<<<<<<< * hi = sigmoid(gates[i*4+1]) * ho = sigmoid(gates[i*4+2]) */ __pyx_v_hf = __pyx_f_5thinc_6neural_3ops_sigmoid((__pyx_v_gates[((__pyx_v_i * 4) + 0)])); /* "thinc/neural/ops.pyx":1336 * for i in range(N): * hf = sigmoid(gates[i*4+0]) * hi = sigmoid(gates[i*4+1]) # <<<<<<<<<<<<<< * ho = sigmoid(gates[i*4+2]) * hc = tanhf(gates[i*4+3]) */ __pyx_v_hi = __pyx_f_5thinc_6neural_3ops_sigmoid((__pyx_v_gates[((__pyx_v_i * 4) + 1)])); /* "thinc/neural/ops.pyx":1337 * hf = sigmoid(gates[i*4+0]) * hi = sigmoid(gates[i*4+1]) * ho = sigmoid(gates[i*4+2]) # <<<<<<<<<<<<<< * hc = tanhf(gates[i*4+3]) * cells[i] = hf * prev[i] + hi * hc */ __pyx_v_ho = __pyx_f_5thinc_6neural_3ops_sigmoid((__pyx_v_gates[((__pyx_v_i * 4) + 2)])); /* "thinc/neural/ops.pyx":1338 * hi = sigmoid(gates[i*4+1]) * ho = sigmoid(gates[i*4+2]) * hc = tanhf(gates[i*4+3]) # <<<<<<<<<<<<<< * cells[i] = hf * prev[i] + hi * hc * output[i] = tanhf(cells[i]) * ho */ __pyx_v_hc = tanhf((__pyx_v_gates[((__pyx_v_i * 4) + 3)])); /* "thinc/neural/ops.pyx":1339 * ho = sigmoid(gates[i*4+2]) * hc = tanhf(gates[i*4+3]) * cells[i] = hf * prev[i] + hi * hc # <<<<<<<<<<<<<< * output[i] = tanhf(cells[i]) * ho * gates[i*4+0] = hf */ (__pyx_v_cells[__pyx_v_i]) = ((__pyx_v_hf * (__pyx_v_prev[__pyx_v_i])) + (__pyx_v_hi * __pyx_v_hc)); /* "thinc/neural/ops.pyx":1340 * hc = tanhf(gates[i*4+3]) * cells[i] = hf * prev[i] + hi * hc * output[i] = tanhf(cells[i]) * ho # <<<<<<<<<<<<<< * gates[i*4+0] = hf * gates[i*4+1] = hi */ (__pyx_v_output[__pyx_v_i]) = (tanhf((__pyx_v_cells[__pyx_v_i])) * __pyx_v_ho); /* "thinc/neural/ops.pyx":1341 * cells[i] = hf * prev[i] + hi * hc * output[i] = tanhf(cells[i]) * ho * gates[i*4+0] = hf # <<<<<<<<<<<<<< * gates[i*4+1] = hi * gates[i*4+2] = ho */ (__pyx_v_gates[((__pyx_v_i * 4) + 0)]) = __pyx_v_hf; /* "thinc/neural/ops.pyx":1342 * output[i] = tanhf(cells[i]) * ho * gates[i*4+0] = hf * gates[i*4+1] = hi # <<<<<<<<<<<<<< * gates[i*4+2] = ho * gates[i*4+3] = hc */ (__pyx_v_gates[((__pyx_v_i * 4) + 1)]) = __pyx_v_hi; /* "thinc/neural/ops.pyx":1343 * gates[i*4+0] = hf * gates[i*4+1] = hi * gates[i*4+2] = ho # <<<<<<<<<<<<<< * gates[i*4+3] = hc * output += N */ (__pyx_v_gates[((__pyx_v_i * 4) + 2)]) = __pyx_v_ho; /* "thinc/neural/ops.pyx":1344 * gates[i*4+1] = hi * gates[i*4+2] = ho * gates[i*4+3] = hc # <<<<<<<<<<<<<< * output += N * gates += N*4 */ (__pyx_v_gates[((__pyx_v_i * 4) + 3)]) = __pyx_v_hc; } /* "thinc/neural/ops.pyx":1345 * gates[i*4+2] = ho * gates[i*4+3] = hc * output += N # <<<<<<<<<<<<<< * gates += N*4 * prev += N */ __pyx_v_output = (__pyx_v_output + __pyx_v_N); /* "thinc/neural/ops.pyx":1346 * gates[i*4+3] = hc * output += N * gates += N*4 # <<<<<<<<<<<<<< * prev += N * cells += N */ __pyx_v_gates = (__pyx_v_gates + (__pyx_v_N * 4)); /* "thinc/neural/ops.pyx":1347 * output += N * gates += N*4 * prev += N # <<<<<<<<<<<<<< * cells += N * */ __pyx_v_prev = (__pyx_v_prev + __pyx_v_N); /* "thinc/neural/ops.pyx":1348 * gates += N*4 * prev += N * cells += N # <<<<<<<<<<<<<< * * */ __pyx_v_cells = (__pyx_v_cells + __pyx_v_N); } /* "thinc/neural/ops.pyx":1329 * * * cdef void cpu_lstm_gates_fwd(float* output, float* cells, float* gates, # <<<<<<<<<<<<<< * const float* prev, int B, int N) nogil: * cdef float hf, hi, ho, hc */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_lstm_gates_fwd", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "thinc/neural/ops.pyx":1351 * * * cdef void cpu_lstm_gates_bwd(float* d_cells, float* d_prev, float* d_gates, # <<<<<<<<<<<<<< * const float* d_output, const float* gates, const float* cells, * const float* prev, int B, int N) nogil: */ static void __pyx_f_5thinc_6neural_3ops_cpu_lstm_gates_bwd(float *__pyx_v_d_cells, float *__pyx_v_d_prev, float *__pyx_v_d_gates, float const *__pyx_v_d_output, float const *__pyx_v_gates, float const *__pyx_v_cells, float const *__pyx_v_prev, int __pyx_v_B, int __pyx_v_N) { float __pyx_v_hf; float __pyx_v_hi; float __pyx_v_ho; float __pyx_v_hc; CYTHON_UNUSED float __pyx_v_c; float __pyx_v_ct; float __pyx_v_dh; float __pyx_v_dho; float __pyx_v_dc; float __pyx_v_dhf; float __pyx_v_dhi; float __pyx_v_dhc; float __pyx_v_dprev; int __pyx_v_i; CYTHON_UNUSED int __pyx_v_b; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("cpu_lstm_gates_bwd", __pyx_f[0], 1351, 1, __PYX_ERR(0, 1351, __pyx_L1_error)); /* "thinc/neural/ops.pyx":1356 * cdef float hf, hi, ho, hc, c, ct, dh, dho, dc, dhf, dhi, dhc, dprev * cdef int i, b * for b in range(B): # <<<<<<<<<<<<<< * for i in range(N): * hf = gates[i*4+0] */ __pyx_t_1 = __pyx_v_B; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_b = __pyx_t_3; /* "thinc/neural/ops.pyx":1357 * cdef int i, b * for b in range(B): * for i in range(N): # <<<<<<<<<<<<<< * hf = gates[i*4+0] * hi = gates[i*4+1] */ __pyx_t_4 = __pyx_v_N; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "thinc/neural/ops.pyx":1358 * for b in range(B): * for i in range(N): * hf = gates[i*4+0] # <<<<<<<<<<<<<< * hi = gates[i*4+1] * ho = gates[i*4+2] */ __pyx_v_hf = (__pyx_v_gates[((__pyx_v_i * 4) + 0)]); /* "thinc/neural/ops.pyx":1359 * for i in range(N): * hf = gates[i*4+0] * hi = gates[i*4+1] # <<<<<<<<<<<<<< * ho = gates[i*4+2] * hc = gates[i*4+3] */ __pyx_v_hi = (__pyx_v_gates[((__pyx_v_i * 4) + 1)]); /* "thinc/neural/ops.pyx":1360 * hf = gates[i*4+0] * hi = gates[i*4+1] * ho = gates[i*4+2] # <<<<<<<<<<<<<< * hc = gates[i*4+3] * c = cells[i] */ __pyx_v_ho = (__pyx_v_gates[((__pyx_v_i * 4) + 2)]); /* "thinc/neural/ops.pyx":1361 * hi = gates[i*4+1] * ho = gates[i*4+2] * hc = gates[i*4+3] # <<<<<<<<<<<<<< * c = cells[i] * ct = tanhf(cells[i]) */ __pyx_v_hc = (__pyx_v_gates[((__pyx_v_i * 4) + 3)]); /* "thinc/neural/ops.pyx":1362 * ho = gates[i*4+2] * hc = gates[i*4+3] * c = cells[i] # <<<<<<<<<<<<<< * ct = tanhf(cells[i]) * dh = d_output[i] */ __pyx_v_c = (__pyx_v_cells[__pyx_v_i]); /* "thinc/neural/ops.pyx":1363 * hc = gates[i*4+3] * c = cells[i] * ct = tanhf(cells[i]) # <<<<<<<<<<<<<< * dh = d_output[i] * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) */ __pyx_v_ct = tanhf((__pyx_v_cells[__pyx_v_i])); /* "thinc/neural/ops.pyx":1364 * c = cells[i] * ct = tanhf(cells[i]) * dh = d_output[i] # <<<<<<<<<<<<<< * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) * dho = ct * dh * dsigmoid(ho) */ __pyx_v_dh = (__pyx_v_d_output[__pyx_v_i]); /* "thinc/neural/ops.pyx":1366 * dh = d_output[i] * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) * dho = ct * dh * dsigmoid(ho) # <<<<<<<<<<<<<< * dc = ho * dh * dtanh(ct) * dc += d_cells[i] # Carry gradient from previous step */ __pyx_v_dho = ((__pyx_v_ct * __pyx_v_dh) * __pyx_f_5thinc_6neural_3ops_dsigmoid(__pyx_v_ho)); /* "thinc/neural/ops.pyx":1367 * # Gradient for ho and c in h = sigmoid(ho) * tanh(c) * dho = ct * dh * dsigmoid(ho) * dc = ho * dh * dtanh(ct) # <<<<<<<<<<<<<< * dc += d_cells[i] # Carry gradient from previous step * */ __pyx_v_dc = ((__pyx_v_ho * __pyx_v_dh) * __pyx_f_5thinc_6neural_3ops_dtanh(__pyx_v_ct)); /* "thinc/neural/ops.pyx":1368 * dho = ct * dh * dsigmoid(ho) * dc = ho * dh * dtanh(ct) * dc += d_cells[i] # Carry gradient from previous step # <<<<<<<<<<<<<< * * # Gradient for hf, hi, hc, prev[i] */ __pyx_v_dc = (__pyx_v_dc + (__pyx_v_d_cells[__pyx_v_i])); /* "thinc/neural/ops.pyx":1372 * # Gradient for hf, hi, hc, prev[i] * # in c = sigmoid(hf) * prev[i] + sigmoid(hi) * tanh(hc) * dhf = dsigmoid(hf) * dc * prev[i] # <<<<<<<<<<<<<< * dhi = dsigmoid(hi) * dc * hc * dhc = dtanh(hc) * dc * hi */ __pyx_v_dhf = ((__pyx_f_5thinc_6neural_3ops_dsigmoid(__pyx_v_hf) * __pyx_v_dc) * (__pyx_v_prev[__pyx_v_i])); /* "thinc/neural/ops.pyx":1373 * # in c = sigmoid(hf) * prev[i] + sigmoid(hi) * tanh(hc) * dhf = dsigmoid(hf) * dc * prev[i] * dhi = dsigmoid(hi) * dc * hc # <<<<<<<<<<<<<< * dhc = dtanh(hc) * dc * hi * dprev = dc * hf */ __pyx_v_dhi = ((__pyx_f_5thinc_6neural_3ops_dsigmoid(__pyx_v_hi) * __pyx_v_dc) * __pyx_v_hc); /* "thinc/neural/ops.pyx":1374 * dhf = dsigmoid(hf) * dc * prev[i] * dhi = dsigmoid(hi) * dc * hc * dhc = dtanh(hc) * dc * hi # <<<<<<<<<<<<<< * dprev = dc * hf * */ __pyx_v_dhc = ((__pyx_f_5thinc_6neural_3ops_dtanh(__pyx_v_hc) * __pyx_v_dc) * __pyx_v_hi); /* "thinc/neural/ops.pyx":1375 * dhi = dsigmoid(hi) * dc * hc * dhc = dtanh(hc) * dc * hi * dprev = dc * hf # <<<<<<<<<<<<<< * * d_gates[i*4+0] = dhf */ __pyx_v_dprev = (__pyx_v_dc * __pyx_v_hf); /* "thinc/neural/ops.pyx":1377 * dprev = dc * hf * * d_gates[i*4+0] = dhf # <<<<<<<<<<<<<< * d_gates[i*4+1] = dhi * d_gates[i*4+2] = dho */ (__pyx_v_d_gates[((__pyx_v_i * 4) + 0)]) = __pyx_v_dhf; /* "thinc/neural/ops.pyx":1378 * * d_gates[i*4+0] = dhf * d_gates[i*4+1] = dhi # <<<<<<<<<<<<<< * d_gates[i*4+2] = dho * d_gates[i*4+3] = dhc */ (__pyx_v_d_gates[((__pyx_v_i * 4) + 1)]) = __pyx_v_dhi; /* "thinc/neural/ops.pyx":1379 * d_gates[i*4+0] = dhf * d_gates[i*4+1] = dhi * d_gates[i*4+2] = dho # <<<<<<<<<<<<<< * d_gates[i*4+3] = dhc * d_cells[i] = dc */ (__pyx_v_d_gates[((__pyx_v_i * 4) + 2)]) = __pyx_v_dho; /* "thinc/neural/ops.pyx":1380 * d_gates[i*4+1] = dhi * d_gates[i*4+2] = dho * d_gates[i*4+3] = dhc # <<<<<<<<<<<<<< * d_cells[i] = dc * d_prev[i] = dprev */ (__pyx_v_d_gates[((__pyx_v_i * 4) + 3)]) = __pyx_v_dhc; /* "thinc/neural/ops.pyx":1381 * d_gates[i*4+2] = dho * d_gates[i*4+3] = dhc * d_cells[i] = dc # <<<<<<<<<<<<<< * d_prev[i] = dprev * d_cells += N */ (__pyx_v_d_cells[__pyx_v_i]) = __pyx_v_dc; /* "thinc/neural/ops.pyx":1382 * d_gates[i*4+3] = dhc * d_cells[i] = dc * d_prev[i] = dprev # <<<<<<<<<<<<<< * d_cells += N * d_prev += N */ (__pyx_v_d_prev[__pyx_v_i]) = __pyx_v_dprev; } /* "thinc/neural/ops.pyx":1383 * d_cells[i] = dc * d_prev[i] = dprev * d_cells += N # <<<<<<<<<<<<<< * d_prev += N * d_output += N */ __pyx_v_d_cells = (__pyx_v_d_cells + __pyx_v_N); /* "thinc/neural/ops.pyx":1384 * d_prev[i] = dprev * d_cells += N * d_prev += N # <<<<<<<<<<<<<< * d_output += N * d_gates += N*4 */ __pyx_v_d_prev = (__pyx_v_d_prev + __pyx_v_N); /* "thinc/neural/ops.pyx":1385 * d_cells += N * d_prev += N * d_output += N # <<<<<<<<<<<<<< * d_gates += N*4 * gates += N*4 */ __pyx_v_d_output = (__pyx_v_d_output + __pyx_v_N); /* "thinc/neural/ops.pyx":1386 * d_prev += N * d_output += N * d_gates += N*4 # <<<<<<<<<<<<<< * gates += N*4 * cells += N */ __pyx_v_d_gates = (__pyx_v_d_gates + (__pyx_v_N * 4)); /* "thinc/neural/ops.pyx":1387 * d_output += N * d_gates += N*4 * gates += N*4 # <<<<<<<<<<<<<< * cells += N * prev += N */ __pyx_v_gates = (__pyx_v_gates + (__pyx_v_N * 4)); /* "thinc/neural/ops.pyx":1388 * d_gates += N*4 * gates += N*4 * cells += N # <<<<<<<<<<<<<< * prev += N * */ __pyx_v_cells = (__pyx_v_cells + __pyx_v_N); /* "thinc/neural/ops.pyx":1389 * gates += N*4 * cells += N * prev += N # <<<<<<<<<<<<<< * * */ __pyx_v_prev = (__pyx_v_prev + __pyx_v_N); } /* "thinc/neural/ops.pyx":1351 * * * cdef void cpu_lstm_gates_bwd(float* d_cells, float* d_prev, float* d_gates, # <<<<<<<<<<<<<< * const float* d_output, const float* gates, const float* cells, * const float* prev, int B, int N) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.neural.ops.cpu_lstm_gates_bwd", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; PyArray_Descr *__pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); __Pyx_TraceCall("__getbuffer__", __pyx_f[1], 258, 0, __PYX_ERR(1, 258, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 * * cdef int i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266 * cdef int i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__119, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 272, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__120, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 * # This is allocated as one block, strides first. * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L9; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L9:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = PyArray_DESCR(self) * cdef int offset */ __pyx_v_f = NULL; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 * cdef int t * cdef char* f = NULL * cdef dtype descr = PyArray_DESCR(self) # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_7 = PyArray_DESCR(__pyx_v_self); __pyx_t_3 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 * cdef int offset * * info.obj = self # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(descr): */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":303 * * if not PyDataType_HASFIELDS(descr): * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L15_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_L15_next_or:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L14_bool_binop_done:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_1)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__121, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 306, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; case NPY_UBYTE: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_v_f = ((char *)"B"); break; case NPY_SHORT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_v_f = ((char *)"h"); break; case NPY_USHORT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_v_f = ((char *)"H"); break; case NPY_INT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_v_f = ((char *)"i"); break; case NPY_UINT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":312 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_v_f = ((char *)"I"); break; case NPY_LONG: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":313 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_v_f = ((char *)"l"); break; case NPY_ULONG: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":314 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_v_f = ((char *)"L"); break; case NPY_LONGLONG: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":315 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_v_f = ((char *)"q"); break; case NPY_ULONGLONG: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":316 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_v_f = ((char *)"Q"); break; case NPY_FLOAT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":317 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_v_f = ((char *)"f"); break; case NPY_DOUBLE: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":318 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_v_f = ((char *)"d"); break; case NPY_LONGDOUBLE: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":319 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_v_f = ((char *)"g"); break; case NPY_CFLOAT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":320 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_v_f = ((char *)"Zf"); break; case NPY_CDOUBLE: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":321 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_v_f = ((char *)"Zd"); break; case NPY_CLONGDOUBLE: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":322 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_v_f = ((char *)"Zg"); break; case NPY_OBJECT: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":323 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_v_f = ((char *)"O"); break; default: /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":325 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 325, __pyx_L1_error) break; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":326 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":327 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":329 * return * else: * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":330 * else: * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":331 * info.format = PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":332 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error) __pyx_v_f = __pyx_t_9; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":335 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__releasebuffer__", 0); __Pyx_TraceCall("__releasebuffer__", __pyx_f[1], 337, 0, __PYX_ERR(1, 337, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":339 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":341 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("numpy.ndarray.__releasebuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); __Pyx_TraceCall("PyArray_MultiIterNew1", __pyx_f[1], 820, 0, __PYX_ERR(1, 820, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); __Pyx_TraceCall("PyArray_MultiIterNew2", __pyx_f[1], 823, 0, __PYX_ERR(1, 823, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); __Pyx_TraceCall("PyArray_MultiIterNew3", __pyx_f[1], 826, 0, __PYX_ERR(1, 826, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":829 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); __Pyx_TraceCall("PyArray_MultiIterNew4", __pyx_f[1], 829, 0, __PYX_ERR(1, 829, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":829 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":832 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); __Pyx_TraceCall("PyArray_MultiIterNew5", __pyx_f[1], 832, 0, __PYX_ERR(1, 832, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":832 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":835 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); __Pyx_TraceCall("PyDataType_SHAPE", __pyx_f[1], 835, 0, __PYX_ERR(1, 835, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return d.subarray.shape * else: */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 * return d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":835 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return d.subarray.shape */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("numpy.PyDataType_SHAPE", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":841 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); __Pyx_TraceCall("_util_dtypestring", __pyx_f[1], 841, 0, __PYX_ERR(1, 841, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 850, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 851, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 852, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 852, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 854, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (unlikely(__pyx_t_6)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__122, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 855, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_6)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__121, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 859, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 869, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 869, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 869, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":870 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":871 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":872 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":876 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 877, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":878 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (unlikely(__pyx_t_6)) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__123, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 879, __pyx_L1_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":878 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":882 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 882, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 882, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 882, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":883 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":884 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":885 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":886 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":887 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":888 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":889 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":890 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":891 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":892 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":893 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":894 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":895 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":896 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":897 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":898 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(__pyx_t_6)) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":900 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 900, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 900, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 900, __pyx_L1_error) } __pyx_L15:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":901 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":876 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":905 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 905, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":906 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":841 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("set_array_base", 0); __Pyx_TraceCall("set_array_base", __pyx_f[1], 1021, 0, __PYX_ERR(1, 1021, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("numpy.set_array_base", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_array_base", 0); __Pyx_TraceCall("get_array_base", __pyx_f[1], 1025, 0, __PYX_ERR(1, 1025, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return base */ } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1029 * if base is NULL: * return None * return base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("numpy.get_array_base", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1033 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_array", 0); __Pyx_TraceCall("import_array", __pyx_f[1], 1033, 0, __PYX_ERR(1, 1033, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1035, __pyx_L3_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1036 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1036, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__124, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1037, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1037, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1033 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1039 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_umath", 0); __Pyx_TraceCall("import_umath", __pyx_f[1], 1039, 0, __PYX_ERR(1, 1039, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1041, __pyx_L3_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1042 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1042, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__125, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1043, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1043, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1039 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1045 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_ufunc", 0); __Pyx_TraceCall("import_ufunc", __pyx_f[1], 1045, 0, __PYX_ERR(1, 1045, __pyx_L1_error)); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1047, __pyx_L3_error) /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1048 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1048, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1049 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__125, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1049, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1049, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1045 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "linalg.pxd":31 * cdef class Vec: * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: # <<<<<<<<<<<<<< * if n_classes == 2: * return 0 if scores[0] > scores[1] else 1 */ static CYTHON_INLINE int __pyx_f_5thinc_6linalg_3Vec_arg_max(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, int const __pyx_v_n_classes) { int __pyx_v_i; int __pyx_v_best; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_mode; int __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("arg_max", __pyx_f[2], 31, 1, __PYX_ERR(2, 31, __pyx_L1_error)); /* "linalg.pxd":32 * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: * if n_classes == 2: # <<<<<<<<<<<<<< * return 0 if scores[0] > scores[1] else 1 * cdef int i */ __pyx_t_1 = ((__pyx_v_n_classes == 2) != 0); if (__pyx_t_1) { /* "linalg.pxd":33 * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: * if n_classes == 2: * return 0 if scores[0] > scores[1] else 1 # <<<<<<<<<<<<<< * cdef int i * cdef int best = 0 */ if ((((__pyx_v_scores[0]) > (__pyx_v_scores[1])) != 0)) { __pyx_t_2 = 0; } else { __pyx_t_2 = 1; } __pyx_r = __pyx_t_2; goto __pyx_L0; /* "linalg.pxd":32 * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: * if n_classes == 2: # <<<<<<<<<<<<<< * return 0 if scores[0] > scores[1] else 1 * cdef int i */ } /* "linalg.pxd":35 * return 0 if scores[0] > scores[1] else 1 * cdef int i * cdef int best = 0 # <<<<<<<<<<<<<< * cdef weight_t mode = scores[0] * for i in range(1, n_classes): */ __pyx_v_best = 0; /* "linalg.pxd":36 * cdef int i * cdef int best = 0 * cdef weight_t mode = scores[0] # <<<<<<<<<<<<<< * for i in range(1, n_classes): * if scores[i] > mode: */ __pyx_v_mode = (__pyx_v_scores[0]); /* "linalg.pxd":37 * cdef int best = 0 * cdef weight_t mode = scores[0] * for i in range(1, n_classes): # <<<<<<<<<<<<<< * if scores[i] > mode: * mode = scores[i] */ __pyx_t_2 = __pyx_v_n_classes; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 1; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "linalg.pxd":38 * cdef weight_t mode = scores[0] * for i in range(1, n_classes): * if scores[i] > mode: # <<<<<<<<<<<<<< * mode = scores[i] * best = i */ __pyx_t_1 = (((__pyx_v_scores[__pyx_v_i]) > __pyx_v_mode) != 0); if (__pyx_t_1) { /* "linalg.pxd":39 * for i in range(1, n_classes): * if scores[i] > mode: * mode = scores[i] # <<<<<<<<<<<<<< * best = i * return best */ __pyx_v_mode = (__pyx_v_scores[__pyx_v_i]); /* "linalg.pxd":40 * if scores[i] > mode: * mode = scores[i] * best = i # <<<<<<<<<<<<<< * return best * */ __pyx_v_best = __pyx_v_i; /* "linalg.pxd":38 * cdef weight_t mode = scores[0] * for i in range(1, n_classes): * if scores[i] > mode: # <<<<<<<<<<<<<< * mode = scores[i] * best = i */ } } /* "linalg.pxd":41 * mode = scores[i] * best = i * return best # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = __pyx_v_best; goto __pyx_L0; /* "linalg.pxd":31 * cdef class Vec: * @staticmethod * cdef inline int arg_max(const weight_t* scores, const int n_classes) nogil: # <<<<<<<<<<<<<< * if n_classes == 2: * return 0 if scores[0] > scores[1] else 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.arg_max", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":44 * * @staticmethod * cdef inline weight_t max(const weight_t* x, int32_t nr) nogil: # <<<<<<<<<<<<<< * if nr == 0: * return 0 */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_max(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, int32_t __pyx_v_nr) { int __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_mode; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int32_t __pyx_t_2; int32_t __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("max", __pyx_f[2], 44, 1, __PYX_ERR(2, 44, __pyx_L1_error)); /* "linalg.pxd":45 * @staticmethod * cdef inline weight_t max(const weight_t* x, int32_t nr) nogil: * if nr == 0: # <<<<<<<<<<<<<< * return 0 * cdef int i */ __pyx_t_1 = ((__pyx_v_nr == 0) != 0); if (__pyx_t_1) { /* "linalg.pxd":46 * cdef inline weight_t max(const weight_t* x, int32_t nr) nogil: * if nr == 0: * return 0 # <<<<<<<<<<<<<< * cdef int i * cdef weight_t mode = x[0] */ __pyx_r = 0.0; goto __pyx_L0; /* "linalg.pxd":45 * @staticmethod * cdef inline weight_t max(const weight_t* x, int32_t nr) nogil: * if nr == 0: # <<<<<<<<<<<<<< * return 0 * cdef int i */ } /* "linalg.pxd":48 * return 0 * cdef int i * cdef weight_t mode = x[0] # <<<<<<<<<<<<<< * for i in range(1, nr): * if x[i] > mode: */ __pyx_v_mode = (__pyx_v_x[0]); /* "linalg.pxd":49 * cdef int i * cdef weight_t mode = x[0] * for i in range(1, nr): # <<<<<<<<<<<<<< * if x[i] > mode: * mode = x[i] */ __pyx_t_2 = __pyx_v_nr; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 1; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "linalg.pxd":50 * cdef weight_t mode = x[0] * for i in range(1, nr): * if x[i] > mode: # <<<<<<<<<<<<<< * mode = x[i] * return mode */ __pyx_t_1 = (((__pyx_v_x[__pyx_v_i]) > __pyx_v_mode) != 0); if (__pyx_t_1) { /* "linalg.pxd":51 * for i in range(1, nr): * if x[i] > mode: * mode = x[i] # <<<<<<<<<<<<<< * return mode * */ __pyx_v_mode = (__pyx_v_x[__pyx_v_i]); /* "linalg.pxd":50 * cdef weight_t mode = x[0] * for i in range(1, nr): * if x[i] > mode: # <<<<<<<<<<<<<< * mode = x[i] * return mode */ } } /* "linalg.pxd":52 * if x[i] > mode: * mode = x[i] * return mode # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = __pyx_v_mode; goto __pyx_L0; /* "linalg.pxd":44 * * @staticmethod * cdef inline weight_t max(const weight_t* x, int32_t nr) nogil: # <<<<<<<<<<<<<< * if nr == 0: * return 0 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.max", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":55 * * @staticmethod * cdef inline weight_t sum(const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * cdef weight_t total = 0 */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_sum(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr) { int __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_total; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("sum", __pyx_f[2], 55, 1, __PYX_ERR(2, 55, __pyx_L1_error)); /* "linalg.pxd":57 * cdef inline weight_t sum(const weight_t* vec, int32_t nr) nogil: * cdef int i * cdef weight_t total = 0 # <<<<<<<<<<<<<< * for i in range(nr): * total += vec[i] */ __pyx_v_total = 0.0; /* "linalg.pxd":58 * cdef int i * cdef weight_t total = 0 * for i in range(nr): # <<<<<<<<<<<<<< * total += vec[i] * return total */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":59 * cdef weight_t total = 0 * for i in range(nr): * total += vec[i] # <<<<<<<<<<<<<< * return total * */ __pyx_v_total = (__pyx_v_total + (__pyx_v_vec[__pyx_v_i])); } /* "linalg.pxd":60 * for i in range(nr): * total += vec[i] * return total # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = __pyx_v_total; goto __pyx_L0; /* "linalg.pxd":55 * * @staticmethod * cdef inline weight_t sum(const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * cdef weight_t total = 0 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.sum", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":63 * * @staticmethod * cdef inline weight_t norm(const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef weight_t total = 0 * for i in range(nr): */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_norm(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_total; int32_t __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int32_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("norm", __pyx_f[2], 63, 1, __PYX_ERR(2, 63, __pyx_L1_error)); /* "linalg.pxd":64 * @staticmethod * cdef inline weight_t norm(const weight_t* vec, int32_t nr) nogil: * cdef weight_t total = 0 # <<<<<<<<<<<<<< * for i in range(nr): * total += vec[i] ** 2 */ __pyx_v_total = 0.0; /* "linalg.pxd":65 * cdef inline weight_t norm(const weight_t* vec, int32_t nr) nogil: * cdef weight_t total = 0 * for i in range(nr): # <<<<<<<<<<<<<< * total += vec[i] ** 2 * return sqrt(total) */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":66 * cdef weight_t total = 0 * for i in range(nr): * total += vec[i] ** 2 # <<<<<<<<<<<<<< * return sqrt(total) * */ __pyx_v_total = (__pyx_v_total + powf(((__pyx_t_5thinc_8typedefs_weight_t)(__pyx_v_vec[__pyx_v_i])), 2.0)); } /* "linalg.pxd":67 * for i in range(nr): * total += vec[i] ** 2 * return sqrt(total) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = sqrt(__pyx_v_total); goto __pyx_L0; /* "linalg.pxd":63 * * @staticmethod * cdef inline weight_t norm(const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef weight_t total = 0 * for i in range(nr): */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.norm", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":70 * * @staticmethod * cdef inline void add(weight_t* output, const weight_t* x, # <<<<<<<<<<<<<< * weight_t inc, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_inc, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add", __pyx_f[2], 70, 1, __PYX_ERR(2, 70, __pyx_L1_error)); /* "linalg.pxd":72 * cdef inline void add(weight_t* output, const weight_t* x, * weight_t inc, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * Vec.add_i(output, inc, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_x, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":73 * weight_t inc, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) * Vec.add_i(output, inc, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_add_i(__pyx_v_output, __pyx_v_inc, __pyx_v_nr); /* "linalg.pxd":70 * * @staticmethod * cdef inline void add(weight_t* output, const weight_t* x, # <<<<<<<<<<<<<< * weight_t inc, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.add", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":76 * * @staticmethod * cdef inline void add_i(weight_t* vec, weight_t inc, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_inc, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add_i", __pyx_f[2], 76, 1, __PYX_ERR(2, 76, __pyx_L1_error)); /* "linalg.pxd":78 * cdef inline void add_i(weight_t* vec, weight_t inc, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] += inc * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":79 * cdef int i * for i in range(nr): * vec[i] += inc # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_vec[__pyx_t_4]) = ((__pyx_v_vec[__pyx_t_4]) + __pyx_v_inc); } /* "linalg.pxd":76 * * @staticmethod * cdef inline void add_i(weight_t* vec, weight_t inc, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.add_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":82 * * @staticmethod * cdef inline void mul(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mul", __pyx_f[2], 82, 1, __PYX_ERR(2, 82, __pyx_L1_error)); /* "linalg.pxd":84 * cdef inline void mul(weight_t* output, const weight_t* vec, weight_t scal, * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * Vec.mul_i(output, scal, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_vec, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":85 * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.mul_i(output, scal, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_v_output, __pyx_v_scal, __pyx_v_nr); /* "linalg.pxd":82 * * @staticmethod * cdef inline void mul(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.mul", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":88 * * @staticmethod * cdef inline void mul_i(weight_t* vec, weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * IF USE_BLAS: */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mul_i", __pyx_f[2], 88, 1, __PYX_ERR(2, 88, __pyx_L1_error)); /* "linalg.pxd":93 * blis.cy.scalv(BLIS_NO_CONJUGATE, nr, scal, vec, 1) * ELSE: * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] *= scal * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":94 * ELSE: * for i in range(nr): * vec[i] *= scal # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_vec[__pyx_t_4]) = ((__pyx_v_vec[__pyx_t_4]) * __pyx_v_scal); } /* "linalg.pxd":88 * * @staticmethod * cdef inline void mul_i(weight_t* vec, weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * IF USE_BLAS: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.mul_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":97 * * @staticmethod * cdef inline void pow(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("pow", __pyx_f[2], 97, 1, __PYX_ERR(2, 97, __pyx_L1_error)); /* "linalg.pxd":99 * cdef inline void pow(weight_t* output, const weight_t* vec, weight_t scal, * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * Vec.pow_i(output, scal, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_vec, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":100 * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.pow_i(output, scal, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_pow_i(__pyx_v_output, __pyx_v_scal, __pyx_v_nr); /* "linalg.pxd":97 * * @staticmethod * cdef inline void pow(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.pow", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":103 * * @staticmethod * cdef inline void pow_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_pow_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t const __pyx_v_scal, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("pow_i", __pyx_f[2], 103, 1, __PYX_ERR(2, 103, __pyx_L1_error)); /* "linalg.pxd":105 * cdef inline void pow_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] **= scal * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":106 * cdef int i * for i in range(nr): * vec[i] **= scal # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_vec[__pyx_t_4]) = powf((__pyx_v_vec[__pyx_t_4]), ((__pyx_t_5thinc_8typedefs_weight_t)__pyx_v_scal)); } /* "linalg.pxd":103 * * @staticmethod * cdef inline void pow_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.pow_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":110 * @staticmethod * @cython.cdivision(True) * cdef inline void div(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scal, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("div", __pyx_f[2], 110, 1, __PYX_ERR(2, 110, __pyx_L1_error)); /* "linalg.pxd":112 * cdef inline void div(weight_t* output, const weight_t* vec, weight_t scal, * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * Vec.div_i(output, scal, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_vec, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":113 * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.div_i(output, scal, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_div_i(__pyx_v_output, __pyx_v_scal, __pyx_v_nr); /* "linalg.pxd":110 * @staticmethod * @cython.cdivision(True) * cdef inline void div(weight_t* output, const weight_t* vec, weight_t scal, # <<<<<<<<<<<<<< * int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.div", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":117 * @staticmethod * @cython.cdivision(True) * cdef inline void div_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_div_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, __pyx_t_5thinc_8typedefs_weight_t const __pyx_v_scal, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("div_i", __pyx_f[2], 117, 1, __PYX_ERR(2, 117, __pyx_L1_error)); /* "linalg.pxd":119 * cdef inline void div_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] /= scal * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":120 * cdef int i * for i in range(nr): * vec[i] /= scal # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_vec[__pyx_t_4]) = ((__pyx_v_vec[__pyx_t_4]) / __pyx_v_scal); } /* "linalg.pxd":117 * @staticmethod * @cython.cdivision(True) * cdef inline void div_i(weight_t* vec, const weight_t scal, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.div_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":123 * * @staticmethod * cdef inline void exp(weight_t* output, const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.exp_i(output, nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_vec, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("exp", __pyx_f[2], 123, 1, __PYX_ERR(2, 123, __pyx_L1_error)); /* "linalg.pxd":124 * @staticmethod * cdef inline void exp(weight_t* output, const weight_t* vec, int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * Vec.exp_i(output, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_vec, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":125 * cdef inline void exp(weight_t* output, const weight_t* vec, int32_t nr) nogil: * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.exp_i(output, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_exp_i(__pyx_v_output, __pyx_v_nr); /* "linalg.pxd":123 * * @staticmethod * cdef inline void exp(weight_t* output, const weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * memcpy(output, vec, sizeof(output[0]) * nr) * Vec.exp_i(output, nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.exp", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":128 * * @staticmethod * cdef inline void exp_i(weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_exp_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("exp_i", __pyx_f[2], 128, 1, __PYX_ERR(2, 128, __pyx_L1_error)); /* "linalg.pxd":130 * cdef inline void exp_i(weight_t* vec, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] = exp(vec[i]) * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":131 * cdef int i * for i in range(nr): * vec[i] = exp(vec[i]) # <<<<<<<<<<<<<< * * @staticmethod */ (__pyx_v_vec[__pyx_v_i]) = exp((__pyx_v_vec[__pyx_v_i])); } /* "linalg.pxd":128 * * @staticmethod * cdef inline void exp_i(weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.exp_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":134 * * @staticmethod * cdef inline void reciprocal_i(weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Vec_reciprocal_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_vec, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("reciprocal_i", __pyx_f[2], 134, 1, __PYX_ERR(2, 134, __pyx_L1_error)); /* "linalg.pxd":136 * cdef inline void reciprocal_i(weight_t* vec, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * vec[i] = 1.0 / vec[i] * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":137 * cdef int i * for i in range(nr): * vec[i] = 1.0 / vec[i] # <<<<<<<<<<<<<< * * @staticmethod */ (__pyx_v_vec[__pyx_v_i]) = (1.0 / (__pyx_v_vec[__pyx_v_i])); } /* "linalg.pxd":134 * * @staticmethod * cdef inline void reciprocal_i(weight_t* vec, int32_t nr) nogil: # <<<<<<<<<<<<<< * cdef int i * for i in range(nr): */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.reciprocal_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":140 * * @staticmethod * cdef inline weight_t mean(const weight_t* X, int32_t nr_dim) nogil: # <<<<<<<<<<<<<< * cdef weight_t mean = 0. * for x in X[:nr_dim]: */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_mean(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, int32_t __pyx_v_nr_dim) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_mean; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_x; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations __pyx_t_5thinc_8typedefs_weight_t const *__pyx_t_1; __pyx_t_5thinc_8typedefs_weight_t const *__pyx_t_2; __pyx_t_5thinc_8typedefs_weight_t const *__pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mean", __pyx_f[2], 140, 1, __PYX_ERR(2, 140, __pyx_L1_error)); /* "linalg.pxd":141 * @staticmethod * cdef inline weight_t mean(const weight_t* X, int32_t nr_dim) nogil: * cdef weight_t mean = 0. # <<<<<<<<<<<<<< * for x in X[:nr_dim]: * mean += x */ __pyx_v_mean = 0.; /* "linalg.pxd":142 * cdef inline weight_t mean(const weight_t* X, int32_t nr_dim) nogil: * cdef weight_t mean = 0. * for x in X[:nr_dim]: # <<<<<<<<<<<<<< * mean += x * return mean / nr_dim */ __pyx_t_2 = (__pyx_v_X + __pyx_v_nr_dim); for (__pyx_t_3 = __pyx_v_X; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_x = (__pyx_t_1[0]); /* "linalg.pxd":143 * cdef weight_t mean = 0. * for x in X[:nr_dim]: * mean += x # <<<<<<<<<<<<<< * return mean / nr_dim * */ __pyx_v_mean = (__pyx_v_mean + __pyx_v_x); } /* "linalg.pxd":144 * for x in X[:nr_dim]: * mean += x * return mean / nr_dim # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = (__pyx_v_mean / __pyx_v_nr_dim); goto __pyx_L0; /* "linalg.pxd":140 * * @staticmethod * cdef inline weight_t mean(const weight_t* X, int32_t nr_dim) nogil: # <<<<<<<<<<<<<< * cdef weight_t mean = 0. * for x in X[:nr_dim]: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.mean", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":147 * * @staticmethod * cdef inline weight_t variance(const weight_t* X, int32_t nr_dim) nogil: # <<<<<<<<<<<<<< * # See https://www.johndcook.com/blog/standard_deviation/ * cdef double m = X[0] */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_3Vec_variance(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_X, int32_t __pyx_v_nr_dim) { double __pyx_v_m; double __pyx_v_v; long __pyx_v_i; double __pyx_v_diff; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; long __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("variance", __pyx_f[2], 147, 1, __PYX_ERR(2, 147, __pyx_L1_error)); /* "linalg.pxd":149 * cdef inline weight_t variance(const weight_t* X, int32_t nr_dim) nogil: * # See https://www.johndcook.com/blog/standard_deviation/ * cdef double m = X[0] # <<<<<<<<<<<<<< * cdef double v = 0. * for i in range(1, nr_dim): */ __pyx_v_m = (__pyx_v_X[0]); /* "linalg.pxd":150 * # See https://www.johndcook.com/blog/standard_deviation/ * cdef double m = X[0] * cdef double v = 0. # <<<<<<<<<<<<<< * for i in range(1, nr_dim): * diff = X[i]-m */ __pyx_v_v = 0.; /* "linalg.pxd":151 * cdef double m = X[0] * cdef double v = 0. * for i in range(1, nr_dim): # <<<<<<<<<<<<<< * diff = X[i]-m * m += diff / (i+1) */ __pyx_t_1 = __pyx_v_nr_dim; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 1; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":152 * cdef double v = 0. * for i in range(1, nr_dim): * diff = X[i]-m # <<<<<<<<<<<<<< * m += diff / (i+1) * v += diff * (X[i] - m) */ __pyx_v_diff = ((__pyx_v_X[__pyx_v_i]) - __pyx_v_m); /* "linalg.pxd":153 * for i in range(1, nr_dim): * diff = X[i]-m * m += diff / (i+1) # <<<<<<<<<<<<<< * v += diff * (X[i] - m) * return v / nr_dim */ __pyx_v_m = (__pyx_v_m + (__pyx_v_diff / (__pyx_v_i + 1))); /* "linalg.pxd":154 * diff = X[i]-m * m += diff / (i+1) * v += diff * (X[i] - m) # <<<<<<<<<<<<<< * return v / nr_dim * */ __pyx_v_v = (__pyx_v_v + (__pyx_v_diff * ((__pyx_v_X[__pyx_v_i]) - __pyx_v_m))); } /* "linalg.pxd":155 * m += diff / (i+1) * v += diff * (X[i] - m) * return v / nr_dim # <<<<<<<<<<<<<< * * */ __pyx_r = (__pyx_v_v / __pyx_v_nr_dim); goto __pyx_L0; /* "linalg.pxd":147 * * @staticmethod * cdef inline weight_t variance(const weight_t* X, int32_t nr_dim) nogil: # <<<<<<<<<<<<<< * # See https://www.johndcook.com/blog/standard_deviation/ * cdef double m = X[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Vec.variance", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":160 * cdef class VecVec: * @staticmethod * cdef inline void add(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, * const weight_t* y, */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add", __pyx_f[2], 160, 1, __PYX_ERR(2, 160, __pyx_L1_error)); /* "linalg.pxd":165 * weight_t scale, * int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * VecVec.add_i(output, y, scale, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_x, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":166 * int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) * VecVec.add_i(output, y, scale, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_output, __pyx_v_y, __pyx_v_scale, __pyx_v_nr); /* "linalg.pxd":160 * cdef class VecVec: * @staticmethod * cdef inline void add(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, * const weight_t* y, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.add", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":169 * * @staticmethod * cdef inline void add_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, * weight_t scale, */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add_i", __pyx_f[2], 169, 1, __PYX_ERR(2, 169, __pyx_L1_error)); /* "linalg.pxd":177 * blis.cy.axpyv(BLIS_NO_CONJUGATE, nr, scale, y, 1, x, 1) * ELSE: * for i in range(nr): # <<<<<<<<<<<<<< * x[i] += y[i] * scale * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":178 * ELSE: * for i in range(nr): * x[i] += y[i] * scale # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_x[__pyx_t_4]) = ((__pyx_v_x[__pyx_t_4]) + ((__pyx_v_y[__pyx_v_i]) * __pyx_v_scale)); } /* "linalg.pxd":169 * * @staticmethod * cdef inline void add_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, * weight_t scale, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.add_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":181 * * @staticmethod * cdef inline void batch_add_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, * weight_t scale, */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_batch_add_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_scale, int32_t __pyx_v_nr, int32_t __pyx_v_nr_batch) { CYTHON_UNUSED int __pyx_v__; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("batch_add_i", __pyx_f[2], 181, 1, __PYX_ERR(2, 181, __pyx_L1_error)); /* "linalg.pxd":187 * # For fixed x, matrix of y * cdef int i, _ * for _ in range(nr_batch): # <<<<<<<<<<<<<< * VecVec.add_i(x, * y, scale, nr) */ __pyx_t_1 = __pyx_v_nr_batch; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v__ = __pyx_t_3; /* "linalg.pxd":188 * cdef int i, _ * for _ in range(nr_batch): * VecVec.add_i(x, # <<<<<<<<<<<<<< * y, scale, nr) * y += nr */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_x, __pyx_v_y, __pyx_v_scale, __pyx_v_nr); /* "linalg.pxd":190 * VecVec.add_i(x, * y, scale, nr) * y += nr # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_v_y = (__pyx_v_y + __pyx_v_nr); } /* "linalg.pxd":181 * * @staticmethod * cdef inline void batch_add_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, * weight_t scale, */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.batch_add_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":193 * * @staticmethod * cdef inline void add_pow(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, weight_t power, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_power, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add_pow", __pyx_f[2], 193, 1, __PYX_ERR(2, 193, __pyx_L1_error)); /* "linalg.pxd":195 * cdef inline void add_pow(weight_t* output, * const weight_t* x, const weight_t* y, weight_t power, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * VecVec.add_pow_i(output, y, power, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_x, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":196 * const weight_t* x, const weight_t* y, weight_t power, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) * VecVec.add_pow_i(output, y, power, nr) # <<<<<<<<<<<<<< * * */ __pyx_f_5thinc_6linalg_6VecVec_add_pow_i(__pyx_v_output, __pyx_v_y, __pyx_v_power, __pyx_v_nr); /* "linalg.pxd":193 * * @staticmethod * cdef inline void add_pow(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, weight_t power, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.add_pow", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":200 * * @staticmethod * cdef inline void add_pow_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, weight_t power, int32_t nr) nogil: * cdef int i */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_add_pow_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_power, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("add_pow_i", __pyx_f[2], 200, 1, __PYX_ERR(2, 200, __pyx_L1_error)); /* "linalg.pxd":203 * const weight_t* y, weight_t power, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * x[i] += y[i] ** power * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":204 * cdef int i * for i in range(nr): * x[i] += y[i] ** power # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_x[__pyx_t_4]) = ((__pyx_v_x[__pyx_t_4]) + powf(((__pyx_t_5thinc_8typedefs_weight_t)(__pyx_v_y[__pyx_v_i])), __pyx_v_power)); } /* "linalg.pxd":200 * * @staticmethod * cdef inline void add_pow_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, weight_t power, int32_t nr) nogil: * cdef int i */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.add_pow_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":207 * * @staticmethod * cdef inline void mul(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_output, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mul", __pyx_f[2], 207, 1, __PYX_ERR(2, 207, __pyx_L1_error)); /* "linalg.pxd":209 * cdef inline void mul(weight_t* output, * const weight_t* x, const weight_t* y, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) # <<<<<<<<<<<<<< * VecVec.mul_i(output, y, nr) * */ (void)(memcpy(__pyx_v_output, __pyx_v_x, ((sizeof((__pyx_v_output[0]))) * __pyx_v_nr))); /* "linalg.pxd":210 * const weight_t* x, const weight_t* y, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) * VecVec.mul_i(output, y, nr) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_6VecVec_mul_i(__pyx_v_output, __pyx_v_y, __pyx_v_nr); /* "linalg.pxd":207 * * @staticmethod * cdef inline void mul(weight_t* output, # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, int32_t nr) nogil: * memcpy(output, x, sizeof(output[0]) * nr) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.mul", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":213 * * @staticmethod * cdef inline void mul_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, int32_t nr) nogil: * cdef int i */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_6VecVec_mul_i(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr) { int __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mul_i", __pyx_f[2], 213, 1, __PYX_ERR(2, 213, __pyx_L1_error)); /* "linalg.pxd":216 * const weight_t* y, int32_t nr) nogil: * cdef int i * for i in range(nr): # <<<<<<<<<<<<<< * x[i] *= y[i] * */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":217 * cdef int i * for i in range(nr): * x[i] *= y[i] # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_t_4 = __pyx_v_i; (__pyx_v_x[__pyx_t_4]) = ((__pyx_v_x[__pyx_t_4]) * (__pyx_v_y[__pyx_v_i])); } /* "linalg.pxd":213 * * @staticmethod * cdef inline void mul_i(weight_t* x, # <<<<<<<<<<<<<< * const weight_t* y, int32_t nr) nogil: * cdef int i */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.mul_i", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":220 * * @staticmethod * cdef inline weight_t dot( # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, int32_t nr) nogil: * cdef int i */ static CYTHON_INLINE __pyx_t_5thinc_8typedefs_weight_t __pyx_f_5thinc_6linalg_6VecVec_dot(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_x, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_y, int32_t __pyx_v_nr) { int __pyx_v_i; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_total; __pyx_t_5thinc_8typedefs_weight_t __pyx_r; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("dot", __pyx_f[2], 220, 1, __PYX_ERR(2, 220, __pyx_L1_error)); /* "linalg.pxd":223 * const weight_t* x, const weight_t* y, int32_t nr) nogil: * cdef int i * cdef weight_t total = 0 # <<<<<<<<<<<<<< * for i in range(nr): * total += x[i] * y[i] */ __pyx_v_total = 0.0; /* "linalg.pxd":224 * cdef int i * cdef weight_t total = 0 * for i in range(nr): # <<<<<<<<<<<<<< * total += x[i] * y[i] * return total */ __pyx_t_1 = __pyx_v_nr; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":225 * cdef weight_t total = 0 * for i in range(nr): * total += x[i] * y[i] # <<<<<<<<<<<<<< * return total * */ __pyx_v_total = (__pyx_v_total + ((__pyx_v_x[__pyx_v_i]) * (__pyx_v_y[__pyx_v_i]))); } /* "linalg.pxd":226 * for i in range(nr): * total += x[i] * y[i] * return total # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = __pyx_v_total; goto __pyx_L0; /* "linalg.pxd":220 * * @staticmethod * cdef inline weight_t dot( # <<<<<<<<<<<<<< * const weight_t* x, const weight_t* y, int32_t nr) nogil: * cdef int i */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.dot", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":229 * * @staticmethod * cdef inline int arg_max_if_true( # <<<<<<<<<<<<<< * const weight_t* scores, const int* is_valid, const int n_classes) nogil: * cdef int i */ static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_true(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, int const *__pyx_v_is_valid, int const __pyx_v_n_classes) { int __pyx_v_i; int __pyx_v_best; int __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("arg_max_if_true", __pyx_f[2], 229, 1, __PYX_ERR(2, 229, __pyx_L1_error)); /* "linalg.pxd":232 * const weight_t* scores, const int* is_valid, const int n_classes) nogil: * cdef int i * cdef int best = -1 # <<<<<<<<<<<<<< * for i in range(n_classes): * if is_valid[i] and (best == -1 or scores[i] > scores[best]): */ __pyx_v_best = -1; /* "linalg.pxd":233 * cdef int i * cdef int best = -1 * for i in range(n_classes): # <<<<<<<<<<<<<< * if is_valid[i] and (best == -1 or scores[i] > scores[best]): * best = i */ __pyx_t_1 = __pyx_v_n_classes; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":234 * cdef int best = -1 * for i in range(n_classes): * if is_valid[i] and (best == -1 or scores[i] > scores[best]): # <<<<<<<<<<<<<< * best = i * return best */ __pyx_t_5 = ((__pyx_v_is_valid[__pyx_v_i]) != 0); if (__pyx_t_5) { } else { __pyx_t_4 = __pyx_t_5; goto __pyx_L6_bool_binop_done; } __pyx_t_5 = ((__pyx_v_best == -1L) != 0); if (!__pyx_t_5) { } else { __pyx_t_4 = __pyx_t_5; goto __pyx_L6_bool_binop_done; } __pyx_t_5 = (((__pyx_v_scores[__pyx_v_i]) > (__pyx_v_scores[__pyx_v_best])) != 0); __pyx_t_4 = __pyx_t_5; __pyx_L6_bool_binop_done:; if (__pyx_t_4) { /* "linalg.pxd":235 * for i in range(n_classes): * if is_valid[i] and (best == -1 or scores[i] > scores[best]): * best = i # <<<<<<<<<<<<<< * return best * */ __pyx_v_best = __pyx_v_i; /* "linalg.pxd":234 * cdef int best = -1 * for i in range(n_classes): * if is_valid[i] and (best == -1 or scores[i] > scores[best]): # <<<<<<<<<<<<<< * best = i * return best */ } } /* "linalg.pxd":236 * if is_valid[i] and (best == -1 or scores[i] > scores[best]): * best = i * return best # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_r = __pyx_v_best; goto __pyx_L0; /* "linalg.pxd":229 * * @staticmethod * cdef inline int arg_max_if_true( # <<<<<<<<<<<<<< * const weight_t* scores, const int* is_valid, const int n_classes) nogil: * cdef int i */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.arg_max_if_true", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":239 * * @staticmethod * cdef inline int arg_max_if_zero( # <<<<<<<<<<<<<< * const weight_t* scores, const weight_t* costs, const int n_classes) nogil: * cdef int i */ static CYTHON_INLINE int __pyx_f_5thinc_6linalg_6VecVec_arg_max_if_zero(__pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_scores, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_costs, int const __pyx_v_n_classes) { int __pyx_v_i; int __pyx_v_best; int __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("arg_max_if_zero", __pyx_f[2], 239, 1, __PYX_ERR(2, 239, __pyx_L1_error)); /* "linalg.pxd":242 * const weight_t* scores, const weight_t* costs, const int n_classes) nogil: * cdef int i * cdef int best = -1 # <<<<<<<<<<<<<< * for i in range(n_classes): * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): */ __pyx_v_best = -1; /* "linalg.pxd":243 * cdef int i * cdef int best = -1 * for i in range(n_classes): # <<<<<<<<<<<<<< * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): * best = i */ __pyx_t_1 = __pyx_v_n_classes; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":244 * cdef int best = -1 * for i in range(n_classes): * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): # <<<<<<<<<<<<<< * best = i * return best */ __pyx_t_5 = (((__pyx_v_costs[__pyx_v_i]) == 0.0) != 0); if (__pyx_t_5) { } else { __pyx_t_4 = __pyx_t_5; goto __pyx_L6_bool_binop_done; } __pyx_t_5 = ((__pyx_v_best == -1L) != 0); if (!__pyx_t_5) { } else { __pyx_t_4 = __pyx_t_5; goto __pyx_L6_bool_binop_done; } __pyx_t_5 = (((__pyx_v_scores[__pyx_v_i]) > (__pyx_v_scores[__pyx_v_best])) != 0); __pyx_t_4 = __pyx_t_5; __pyx_L6_bool_binop_done:; if (__pyx_t_4) { /* "linalg.pxd":245 * for i in range(n_classes): * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): * best = i # <<<<<<<<<<<<<< * return best * */ __pyx_v_best = __pyx_v_i; /* "linalg.pxd":244 * cdef int best = -1 * for i in range(n_classes): * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): # <<<<<<<<<<<<<< * best = i * return best */ } } /* "linalg.pxd":246 * if costs[i] == 0 and (best == -1 or scores[i] > scores[best]): * best = i * return best # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_best; goto __pyx_L0; /* "linalg.pxd":239 * * @staticmethod * cdef inline int arg_max_if_zero( # <<<<<<<<<<<<<< * const weight_t* scores, const weight_t* costs, const int n_classes) nogil: * cdef int i */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.VecVec.arg_max_if_zero", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "linalg.pxd":251 * cdef class Mat: * @staticmethod * cdef inline void mean_row(weight_t* Ex, # <<<<<<<<<<<<<< * const weight_t* mat, int32_t nr_row, int32_t nr_col) nogil: * memset(Ex, 0, sizeof(Ex[0]) * nr_col) */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_mean_row(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_Ex, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_mat, int32_t __pyx_v_nr_row, int32_t __pyx_v_nr_col) { int32_t __pyx_v_i; __Pyx_TraceDeclarations int32_t __pyx_t_1; int32_t __pyx_t_2; int32_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("mean_row", __pyx_f[2], 251, 1, __PYX_ERR(2, 251, __pyx_L1_error)); /* "linalg.pxd":253 * cdef inline void mean_row(weight_t* Ex, * const weight_t* mat, int32_t nr_row, int32_t nr_col) nogil: * memset(Ex, 0, sizeof(Ex[0]) * nr_col) # <<<<<<<<<<<<<< * for i in range(nr_row): * VecVec.add_i(Ex, &mat[i * nr_col], 1.0, nr_col) */ (void)(memset(__pyx_v_Ex, 0, ((sizeof((__pyx_v_Ex[0]))) * __pyx_v_nr_col))); /* "linalg.pxd":254 * const weight_t* mat, int32_t nr_row, int32_t nr_col) nogil: * memset(Ex, 0, sizeof(Ex[0]) * nr_col) * for i in range(nr_row): # <<<<<<<<<<<<<< * VecVec.add_i(Ex, &mat[i * nr_col], 1.0, nr_col) * Vec.mul_i(Ex, 1.0 / nr_row, nr_col) */ __pyx_t_1 = __pyx_v_nr_row; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "linalg.pxd":255 * memset(Ex, 0, sizeof(Ex[0]) * nr_col) * for i in range(nr_row): * VecVec.add_i(Ex, &mat[i * nr_col], 1.0, nr_col) # <<<<<<<<<<<<<< * Vec.mul_i(Ex, 1.0 / nr_row, nr_col) * */ __pyx_f_5thinc_6linalg_6VecVec_add_i(__pyx_v_Ex, (&(__pyx_v_mat[(__pyx_v_i * __pyx_v_nr_col)])), 1.0, __pyx_v_nr_col); } /* "linalg.pxd":256 * for i in range(nr_row): * VecVec.add_i(Ex, &mat[i * nr_col], 1.0, nr_col) * Vec.mul_i(Ex, 1.0 / nr_row, nr_col) # <<<<<<<<<<<<<< * * @staticmethod */ __pyx_f_5thinc_6linalg_3Vec_mul_i(__pyx_v_Ex, (1.0 / __pyx_v_nr_row), __pyx_v_nr_col); /* "linalg.pxd":251 * cdef class Mat: * @staticmethod * cdef inline void mean_row(weight_t* Ex, # <<<<<<<<<<<<<< * const weight_t* mat, int32_t nr_row, int32_t nr_col) nogil: * memset(Ex, 0, sizeof(Ex[0]) * nr_col) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Mat.mean_row", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "linalg.pxd":259 * * @staticmethod * cdef inline void var_row(weight_t* Vx, # <<<<<<<<<<<<<< * const weight_t* mat, const weight_t* Ex, * int32_t nr_row, int32_t nr_col, weight_t eps) nogil: */ static CYTHON_INLINE void __pyx_f_5thinc_6linalg_3Mat_var_row(__pyx_t_5thinc_8typedefs_weight_t *__pyx_v_Vx, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_mat, __pyx_t_5thinc_8typedefs_weight_t const *__pyx_v_Ex, int32_t __pyx_v_nr_row, int32_t __pyx_v_nr_col, __pyx_t_5thinc_8typedefs_weight_t __pyx_v_eps) { __pyx_t_5thinc_8typedefs_weight_t __pyx_v_sum_; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_sum2; int32_t __pyx_v_i; int32_t __pyx_v_j; __pyx_t_5thinc_8typedefs_weight_t __pyx_v_x; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int32_t __pyx_t_3; int32_t __pyx_t_4; int32_t __pyx_t_5; int32_t __pyx_t_6; int32_t __pyx_t_7; int32_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("var_row", __pyx_f[2], 259, 1, __PYX_ERR(2, 259, __pyx_L1_error)); /* "linalg.pxd":263 * int32_t nr_row, int32_t nr_col, weight_t eps) nogil: * # From https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance * if nr_row == 0 or nr_col == 0: # <<<<<<<<<<<<<< * return * cdef weight_t sum_, sum2 */ __pyx_t_2 = ((__pyx_v_nr_row == 0) != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = ((__pyx_v_nr_col == 0) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "linalg.pxd":264 * # From https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance * if nr_row == 0 or nr_col == 0: * return # <<<<<<<<<<<<<< * cdef weight_t sum_, sum2 * for i in range(nr_col): */ goto __pyx_L0; /* "linalg.pxd":263 * int32_t nr_row, int32_t nr_col, weight_t eps) nogil: * # From https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance * if nr_row == 0 or nr_col == 0: # <<<<<<<<<<<<<< * return * cdef weight_t sum_, sum2 */ } /* "linalg.pxd":266 * return * cdef weight_t sum_, sum2 * for i in range(nr_col): # <<<<<<<<<<<<<< * sum_ = 0.0 * sum2 = 0.0 */ __pyx_t_3 = __pyx_v_nr_col; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "linalg.pxd":267 * cdef weight_t sum_, sum2 * for i in range(nr_col): * sum_ = 0.0 # <<<<<<<<<<<<<< * sum2 = 0.0 * for j in range(nr_row): */ __pyx_v_sum_ = 0.0; /* "linalg.pxd":268 * for i in range(nr_col): * sum_ = 0.0 * sum2 = 0.0 # <<<<<<<<<<<<<< * for j in range(nr_row): * x = mat[j * nr_col + i] */ __pyx_v_sum2 = 0.0; /* "linalg.pxd":269 * sum_ = 0.0 * sum2 = 0.0 * for j in range(nr_row): # <<<<<<<<<<<<<< * x = mat[j * nr_col + i] * sum2 += (x - Ex[i]) ** 2 */ __pyx_t_6 = __pyx_v_nr_row; __pyx_t_7 = __pyx_t_6; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_j = __pyx_t_8; /* "linalg.pxd":270 * sum2 = 0.0 * for j in range(nr_row): * x = mat[j * nr_col + i] # <<<<<<<<<<<<<< * sum2 += (x - Ex[i]) ** 2 * sum_ += x - Ex[i] */ __pyx_v_x = (__pyx_v_mat[((__pyx_v_j * __pyx_v_nr_col) + __pyx_v_i)]); /* "linalg.pxd":271 * for j in range(nr_row): * x = mat[j * nr_col + i] * sum2 += (x - Ex[i]) ** 2 # <<<<<<<<<<<<<< * sum_ += x - Ex[i] * Vx[i] = (sum2 - sum_**2 / nr_row) / nr_row */ __pyx_v_sum2 = (__pyx_v_sum2 + powf((__pyx_v_x - (__pyx_v_Ex[__pyx_v_i])), 2.0)); /* "linalg.pxd":272 * x = mat[j * nr_col + i] * sum2 += (x - Ex[i]) ** 2 * sum_ += x - Ex[i] # <<<<<<<<<<<<<< * Vx[i] = (sum2 - sum_**2 / nr_row) / nr_row * Vx[i] += eps */ __pyx_v_sum_ = (__pyx_v_sum_ + (__pyx_v_x - (__pyx_v_Ex[__pyx_v_i]))); } /* "linalg.pxd":273 * sum2 += (x - Ex[i]) ** 2 * sum_ += x - Ex[i] * Vx[i] = (sum2 - sum_**2 / nr_row) / nr_row # <<<<<<<<<<<<<< * Vx[i] += eps */ (__pyx_v_Vx[__pyx_v_i]) = ((__pyx_v_sum2 - (powf(__pyx_v_sum_, 2.0) / __pyx_v_nr_row)) / __pyx_v_nr_row); /* "linalg.pxd":274 * sum_ += x - Ex[i] * Vx[i] = (sum2 - sum_**2 / nr_row) / nr_row * Vx[i] += eps # <<<<<<<<<<<<<< */ __pyx_t_6 = __pyx_v_i; (__pyx_v_Vx[__pyx_t_6]) = ((__pyx_v_Vx[__pyx_t_6]) + __pyx_v_eps); } /* "linalg.pxd":259 * * @staticmethod * cdef inline void var_row(weight_t* Vx, # <<<<<<<<<<<<<< * const weight_t* mat, const weight_t* Ex, * int32_t nr_row, int32_t nr_col, weight_t eps) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("thinc.linalg.Mat.var_row", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(3, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(3, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(3, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(3, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(3, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(3, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_TraceCall("__cinit__", __pyx_f[3], 122, 0, __PYX_ERR(3, 122, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(3, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(3, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__126, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__127, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(3, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(3, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(3, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__128, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(3, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(3, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(3, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(3, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(3, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(3, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(3, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(3, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(3, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__129, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(3, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(3, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(3, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(3, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); __Pyx_TraceCall("__getbuffer__", __pyx_f[3], 185, 0, __PYX_ERR(3, 185, __pyx_L1_error)); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(3, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__130, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__dealloc__", 0); __Pyx_TraceCall("__dealloc__", __pyx_f[3], 211, 0, __PYX_ERR(3, 211, __pyx_L1_error)); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.array.__dealloc__", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 222, 0, __PYX_ERR(3, 222, __pyx_L1_error)); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); __Pyx_TraceCall("get_memview", __pyx_f[3], 226, 0, __PYX_ERR(3, 226, __pyx_L1_error)); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__len__", 0); __Pyx_TraceCall("__len__", __pyx_f[3], 230, 0, __PYX_ERR(3, 230, __pyx_L1_error)); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.array.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); __Pyx_TraceCall("__getattr__", __pyx_f[3], 233, 0, __PYX_ERR(3, 233, __pyx_L1_error)); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); __Pyx_TraceCall("__getitem__", __pyx_f[3], 236, 0, __PYX_ERR(3, 236, __pyx_L1_error)); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_TraceCall("__setitem__", __pyx_f[3], 239, 0, __PYX_ERR(3, 239, __pyx_L1_error)); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(3, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); __Pyx_TraceCall("__reduce_cython__", __pyx_f[3], 1, 0, __PYX_ERR(3, 1, __pyx_L1_error)); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__131, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); __Pyx_TraceCall("__setstate_cython__", __pyx_f[3], 3, 0, __PYX_ERR(3, 3, __pyx_L1_error)); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__132, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); __Pyx_TraceCall("array_cwrapper", __pyx_f[3], 244, 0, __PYX_ERR(3, 244, __pyx_L1_error)); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(3, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(3, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(3, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); __Pyx_TraceCall("__init__", __pyx_f[3], 281, 0, __PYX_ERR(3, 281, __pyx_L1_error)); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); __Pyx_TraceCall("__repr__", __pyx_f[3], 283, 0, __PYX_ERR(3, 283, __pyx_L1_error)); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); __Pyx_TraceCall("__reduce_cython__", __pyx_f[3], 1, 0, __PYX_ERR(3, 1, __pyx_L1_error)); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); __Pyx_TraceCall("__setstate_cython__", __pyx_f[3], 16, 0, __PYX_ERR(3, 16, __pyx_L1_error)); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(3, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("align_pointer", __pyx_f[3], 298, 1, __PYX_ERR(3, 298, __pyx_L1_error)); /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = memory */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.align_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(3, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(3, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(3, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_TraceCall("__cinit__", __pyx_f[3], 345, 0, __PYX_ERR(3, 345, __pyx_L1_error)); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(3, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(3, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__dealloc__", 0); __Pyx_TraceCall("__dealloc__", __pyx_f[3], 372, 0, __PYX_ERR(3, 372, __pyx_L1_error)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.memoryview.__dealloc__", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); __Pyx_TraceCall("get_item_pointer", __pyx_f[3], 393, 0, __PYX_ERR(3, 393, __pyx_L1_error)); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(3, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(3, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(3, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(3, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); __Pyx_TraceCall("__getitem__", __pyx_f[3], 403, 0, __PYX_ERR(3, 403, __pyx_L1_error)); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(3, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(3, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(3, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(3, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_TraceCall("__setitem__", __pyx_f[3], 416, 0, __PYX_ERR(3, 416, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__133, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(3, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(3, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(3, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(3, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_TraceCall("is_slice", __pyx_f[3], 431, 0, __PYX_ERR(3, 431, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(3, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(3, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); __Pyx_TraceCall("setitem_slice_assignment", __pyx_f[3], 441, 0, __PYX_ERR(3, 441, __pyx_L1_error)); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(3, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(3, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(3, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(3, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(3, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(3, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); __Pyx_TraceCall("setitem_slice_assign_scalar", __pyx_f[3], 449, 0, __PYX_ERR(3, 449, __pyx_L1_error)); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(3, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(3, 461, __pyx_L1_error) /* "View.MemoryView":460 * if self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * ( item)[0] = value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * ( item)[0] = value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * ( item)[0] = value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object( item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * ( item)[0] = value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * ( item)[0] = value * else: * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); __Pyx_TraceCall("setitem_indexed", __pyx_f[3], 481, 0, __PYX_ERR(3, 481, __pyx_L1_error)); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(3, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); __Pyx_TraceCall("convert_item_to_object", __pyx_f[3], 485, 0, __PYX_ERR(3, 485, __pyx_L1_error)); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(3, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__134, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(3, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); __Pyx_TraceCall("assign_item_from_object", __pyx_f[3], 501, 0, __PYX_ERR(3, 501, __pyx_L1_error)); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(3, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(3, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(3, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); __Pyx_TraceCall("__getbuffer__", __pyx_f[3], 518, 0, __PYX_ERR(3, 518, __pyx_L1_error)); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__135, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 553, 0, __PYX_ERR(3, 553, __pyx_L1_error)); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(3, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(3, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 559, 0, __PYX_ERR(3, 559, __pyx_L1_error)); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 563, 0, __PYX_ERR(3, 563, __pyx_L1_error)); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(3, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 567, 0, __PYX_ERR(3, 567, __pyx_L1_error)); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__136, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(3, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(3, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 575, 0, __PYX_ERR(3, 575, __pyx_L1_error)); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__137, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(3, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 582, 0, __PYX_ERR(3, 582, __pyx_L1_error)); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 586, 0, __PYX_ERR(3, 586, __pyx_L1_error)); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 590, 0, __PYX_ERR(3, 590, __pyx_L1_error)); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 594, 0, __PYX_ERR(3, 594, __pyx_L1_error)); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__len__", 0); __Pyx_TraceCall("__len__", __pyx_f[3], 605, 0, __PYX_ERR(3, 605, __pyx_L1_error)); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); __Pyx_TraceCall("__repr__", __pyx_f[3], 611, 0, __PYX_ERR(3, 611, __pyx_L1_error)); /* "View.MemoryView":612 * * def __repr__(self): * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); __Pyx_TraceCall("__str__", __pyx_f[3], 615, 0, __PYX_ERR(3, 615, __pyx_L1_error)); /* "View.MemoryView":616 * * def __str__(self): * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); __Pyx_TraceCall("is_c_contig", __pyx_f[3], 619, 0, __PYX_ERR(3, 619, __pyx_L1_error)); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(3, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); __Pyx_TraceCall("is_f_contig", __pyx_f[3], 625, 0, __PYX_ERR(3, 625, __pyx_L1_error)); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(3, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); __Pyx_TraceCall("copy", __pyx_f[3], 631, 0, __PYX_ERR(3, 631, __pyx_L1_error)); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(3, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); __Pyx_TraceCall("copy_fortran", __pyx_f[3], 643, 0, __PYX_ERR(3, 643, __pyx_L1_error)); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(3, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); __Pyx_TraceCall("__reduce_cython__", __pyx_f[3], 1, 0, __PYX_ERR(3, 1, __pyx_L1_error)); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__138, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); __Pyx_TraceCall("__setstate_cython__", __pyx_f[3], 3, 0, __PYX_ERR(3, 3, __pyx_L1_error)); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__139, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); __Pyx_TraceCall("memoryview_cwrapper", __pyx_f[3], 657, 0, __PYX_ERR(3, 657, __pyx_L1_error)); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_check", 0); __Pyx_TraceCall("memoryview_check", __pyx_f[3], 663, 0, __PYX_ERR(3, 663, __pyx_L1_error)); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.memoryview_check", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); __Pyx_TraceCall("_unellipsify", __pyx_f[3], 666, 0, __PYX_ERR(3, 666, __pyx_L1_error)); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(3, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(3, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(3, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(3, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__3); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(3, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(3, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(3, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(3, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(3, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(3, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__3); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(3, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(3, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); __Pyx_TraceCall("assert_direct_dimensions", __pyx_f[3], 700, 0, __PYX_ERR(3, 700, __pyx_L1_error)); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__140, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(3, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); __Pyx_TraceCall("memview_slice", __pyx_f[3], 710, 0, __PYX_ERR(3, 710, __pyx_L1_error)); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(3, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(3, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(3, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(3, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(3, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(3, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(3, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(3, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(3, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(3, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(3, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(3, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(3, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(3, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("slice_memviewslice", __pyx_f[3], 807, 1, __PYX_ERR(3, 807, __pyx_L1_error)); /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(3, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(3, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = ( dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = ( dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = ( dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = ( dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(3, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = ( dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); __Pyx_TraceCall("pybuffer_index", __pyx_f[3], 910, 0, __PYX_ERR(3, 910, __pyx_L1_error)); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(3, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(3, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = ( resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = ( resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = ( resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = ( resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("transpose_memslice", __pyx_f[3], 943, 1, __PYX_ERR(3, 943, __pyx_L1_error)); /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(3, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__dealloc__", 0); __Pyx_TraceCall("__dealloc__", __pyx_f[3], 976, 0, __PYX_ERR(3, 976, __pyx_L1_error)); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView._memoryviewslice.__dealloc__", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); __Pyx_TraceCall("convert_item_to_object", __pyx_f[3], 979, 0, __PYX_ERR(3, 979, __pyx_L1_error)); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); __Pyx_TraceCall("assign_item_from_object", __pyx_f[3], 985, 0, __PYX_ERR(3, 985, __pyx_L1_error)); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(3, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_TraceCall("__get__", __pyx_f[3], 992, 0, __PYX_ERR(3, 992, __pyx_L1_error)); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView._memoryviewslice.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); __Pyx_TraceCall("__reduce_cython__", __pyx_f[3], 1, 0, __PYX_ERR(3, 1, __pyx_L1_error)); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__141, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); __Pyx_TraceCall("__setstate_cython__", __pyx_f[3], 3, 0, __PYX_ERR(3, 3, __pyx_L1_error)); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__142, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); __Pyx_TraceCall("memoryview_fromslice", __pyx_f[3], 999, 0, __PYX_ERR(3, 999, __pyx_L1_error)); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = ( memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = ( memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = result.from_slice.shape * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(3, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); __Pyx_TraceCall("get_slice_from_memview", __pyx_f[3], 1052, 0, __PYX_ERR(3, 1052, __pyx_L1_error)); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(3, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("slice_copy", 0); __Pyx_TraceCall("slice_copy", __pyx_f[3], 1063, 0, __PYX_ERR(3, 1063, __pyx_L1_error)); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.slice_copy", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); __Pyx_TraceCall("memoryview_copy", __pyx_f[3], 1080, 0, __PYX_ERR(3, 1080, __pyx_L1_error)); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); __Pyx_TraceCall("memoryview_copy_from_slice", __pyx_f[3], 1087, 0, __PYX_ERR(3, 1087, __pyx_L1_error)); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(3, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("abs_py_ssize_t", __pyx_f[3], 1109, 1, __PYX_ERR(3, 1109, __pyx_L1_error)); /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.abs_py_ssize_t", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("get_best_order", __pyx_f[3], 1116, 1, __PYX_ERR(3, 1116, __pyx_L1_error)); /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.get_best_order", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * src_stride == itemsize == dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * src_stride == itemsize == dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * src_stride == itemsize == dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * src_stride == itemsize == dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * src_stride == itemsize == dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * src_stride == itemsize == dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("copy_strided_to_strided", __pyx_f[3], 1170, 1, __PYX_ERR(3, 1170, __pyx_L1_error)); /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.copy_strided_to_strided", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; __Pyx_TraceDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("slice_get_size", __pyx_f[3], 1177, 1, __PYX_ERR(3, 1177, __pyx_L1_error)); /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.slice_get_size", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("fill_contig_strides_array", __pyx_f[3], 1187, 1, __PYX_ERR(3, 1187, __pyx_L1_error)); /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.fill_contig_strides_array", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_r = 0; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; __Pyx_TraceDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("copy_data_to_temp", __pyx_f[3], 1208, 1, __PYX_ERR(3, 1208, __pyx_L1_error)); /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(3, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); __Pyx_TraceCall("_err_extents", __pyx_f[3], 1251, 0, __PYX_ERR(3, 1251, __pyx_L1_error)); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(3, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_TraceCall("_err_dim", __pyx_f[3], 1257, 0, __PYX_ERR(3, 1257, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(3, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_TraceCall("_err", __pyx_f[3], 1261, 0, __PYX_ERR(3, 1261, __pyx_L1_error)); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(3, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(3, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; __Pyx_TraceDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("memoryview_copy_contents", __pyx_f[3], 1268, 1, __PYX_ERR(3, 1268, __pyx_L1_error)); /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(3, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(3, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(3, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(3, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(3, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("broadcast_leading", __pyx_f[3], 1340, 1, __PYX_ERR(3, 1340, __pyx_L1_error)); /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.broadcast_leading", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_TraceDeclarations int __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("refcount_copying", __pyx_f[3], 1362, 1, __PYX_ERR(3, 1362, __pyx_L1_error)); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.refcount_copying", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); __Pyx_TraceCall("refcount_objects_in_slice_with_gil", __pyx_f[3], 1371, 0, __PYX_ERR(3, 1371, __pyx_L1_error)); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.refcount_objects_in_slice_with_gil", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); __Pyx_TraceCall("refcount_objects_in_slice", __pyx_f[3], 1377, 0, __PYX_ERR(3, 1377, __pyx_L1_error)); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF(( data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF(( data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF(( data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF(( data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF(( data)[0]) * else: * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF(( data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF(( data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.refcount_objects_in_slice", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_L0:; __Pyx_TraceReturn(Py_None, 0); __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { __Pyx_TraceDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("slice_assign_scalar", __pyx_f[3], 1397, 1, __PYX_ERR(3, 1397, __pyx_L1_error)); /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView.slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; __Pyx_TraceDeclarations int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceCall("_slice_assign_scalar", __pyx_f[3], 1407, 1, __PYX_ERR(3, 1407, __pyx_L1_error)); /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("View.MemoryView._slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; __Pyx_TraceReturn(Py_None, 1); } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(3, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(3, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(3, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(3, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(3, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_TraceFrameInit(__pyx_codeobj__143) __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); __Pyx_TraceCall("__pyx_unpickle_Enum", __pyx_f[3], 1, 0, __PYX_ERR(3, 1, __pyx_L1_error)); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(3, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(3, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(3, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_TraceDeclarations __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); __Pyx_TraceCall("__pyx_unpickle_Enum__set_state", __pyx_f[3], 11, 0, __PYX_ERR(3, 11, __pyx_L1_error)); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(3, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(3, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(3, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(3, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(3, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(3, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(3, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_TraceReturn(__pyx_r, 0); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences[8]; static int __pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences = 0; static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences)))) { o = (PyObject*)__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences[--__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences]; memset(o, 0, sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(PyObject *o) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_v_inplace); Py_CLEAR(p->__pyx_v_masks); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences)))) { __pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences[__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences++] = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)o; if (p->__pyx_v_inplace) { e = (*v)(p->__pyx_v_inplace, a); if (e) return e; } if (p->__pyx_v_masks) { e = (*v)(p->__pyx_v_masks, a); if (e) return e; } return 0; } static int __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences(PyObject *o) { PyObject* tmp; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)o; tmp = ((PyObject*)p->__pyx_v_inplace); p->__pyx_v_inplace = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_masks); p->__pyx_v_masks = ((PyObject*)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.__pyx_scope_struct__dropout_sequences", /*tp_name*/ sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences, /*tp_traverse*/ __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop[8]; static int __pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop = 0; static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop)))) { o = (PyObject*)__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop[--__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop]; memset(o, 0, sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(PyObject *o) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_outer_scope); Py_CLEAR(p->__pyx_v_backprop); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop)))) { __pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop[__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop++] = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)o; if (p->__pyx_outer_scope) { e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; } if (p->__pyx_v_backprop) { e = (*v)(p->__pyx_v_backprop, a); if (e) return e; } return 0; } static int __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop(PyObject *o) { PyObject* tmp; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop *)o; tmp = ((PyObject*)p->__pyx_outer_scope); p->__pyx_outer_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_backprop); p->__pyx_v_backprop = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.__pyx_scope_struct_1_wrap_backprop", /*tp_name*/ sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop, /*tp_traverse*/ __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_2_dropout[8]; static int __pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_2_dropout = 0; static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_2_dropout > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout)))) { o = (PyObject*)__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_2_dropout[--__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_2_dropout]; memset(o, 0, sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(PyObject *o) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_v_mask); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_2_dropout < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout)))) { __pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_2_dropout[__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_2_dropout++] = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)o; if (p->__pyx_v_mask) { e = (*v)(p->__pyx_v_mask, a); if (e) return e; } return 0; } static int __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_2_dropout(PyObject *o) { PyObject* tmp; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)o; tmp = ((PyObject*)p->__pyx_v_mask); p->__pyx_v_mask = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.__pyx_scope_struct_2_dropout", /*tp_name*/ sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_2_dropout, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_2_dropout, /*tp_traverse*/ __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_2_dropout, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_2_dropout, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop[8]; static int __pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop = 0; static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop)))) { o = (PyObject*)__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop[--__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop]; memset(o, 0, sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(PyObject *o) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_outer_scope); Py_CLEAR(p->__pyx_v_backprop); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop)))) { __pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop[__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop++] = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)o; if (p->__pyx_outer_scope) { e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; } if (p->__pyx_v_backprop) { e = (*v)(p->__pyx_v_backprop, a); if (e) return e; } return 0; } static int __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop(PyObject *o) { PyObject* tmp; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop *)o; tmp = ((PyObject*)p->__pyx_outer_scope); p->__pyx_outer_scope = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_2_dropout *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_backprop); p->__pyx_v_backprop = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.__pyx_scope_struct_3_wrap_backprop", /*tp_name*/ sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop, /*tp_traverse*/ __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences[8]; static int __pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences = 0; static PyObject *__pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences)))) { o = (PyObject*)__pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences[--__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences]; memset(o, 0, sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences)); (void) PyObject_INIT(o, t); PyObject_GC_Track(o); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(PyObject *o) { struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)o; PyObject_GC_UnTrack(o); Py_CLEAR(p->__pyx_v_extra_dims); Py_CLEAR(p->__pyx_v_indices); Py_CLEAR(p->__pyx_v_lengths); Py_CLEAR(p->__pyx_v_self); if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences)))) { __pyx_freelist_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences[__pyx_freecount_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences++] = ((struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static int __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(PyObject *o, visitproc v, void *a) { int e; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)o; if (p->__pyx_v_extra_dims) { e = (*v)(p->__pyx_v_extra_dims, a); if (e) return e; } if (p->__pyx_v_indices) { e = (*v)(p->__pyx_v_indices, a); if (e) return e; } if (p->__pyx_v_lengths) { e = (*v)(p->__pyx_v_lengths, a); if (e) return e; } if (p->__pyx_v_self) { e = (*v)(p->__pyx_v_self, a); if (e) return e; } return 0; } static int __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences(PyObject *o) { PyObject* tmp; struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *p = (struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences *)o; tmp = ((PyObject*)p->__pyx_v_extra_dims); p->__pyx_v_extra_dims = ((PyObject*)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_indices); p->__pyx_v_indices = ((PyObject*)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_lengths); p->__pyx_v_lengths = ((PyObject*)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_self); p->__pyx_v_self = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyTypeObject __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.__pyx_scope_struct_4_square_sequences", /*tp_name*/ sizeof(struct __pyx_obj_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences, /*tp_traverse*/ __pyx_tp_clear_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "thinc.neural.ops._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_ops(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_ops}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "ops", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_n_s_B, __pyx_k_B, sizeof(__pyx_k_B), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_n_s_C, __pyx_k_C, sizeof(__pyx_k_C), 0, 0, 1, 1}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_CupyOps, __pyx_k_CupyOps, sizeof(__pyx_k_CupyOps), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_adam, __pyx_k_CupyOps_adam, sizeof(__pyx_k_CupyOps_adam), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_asarray, __pyx_k_CupyOps_asarray, sizeof(__pyx_k_CupyOps_asarray), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_max_pool, __pyx_k_CupyOps_backprop_max_pool, sizeof(__pyx_k_CupyOps_backprop_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_maxout, __pyx_k_CupyOps_backprop_maxout, sizeof(__pyx_k_CupyOps_backprop_maxout), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_mean_pool, __pyx_k_CupyOps_backprop_mean_pool, sizeof(__pyx_k_CupyOps_backprop_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_mish, __pyx_k_CupyOps_backprop_mish, sizeof(__pyx_k_CupyOps_backprop_mish), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_relu, __pyx_k_CupyOps_backprop_relu, sizeof(__pyx_k_CupyOps_backprop_relu), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_selu, __pyx_k_CupyOps_backprop_selu, sizeof(__pyx_k_CupyOps_backprop_selu), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_seq2col, __pyx_k_CupyOps_backprop_seq2col, sizeof(__pyx_k_CupyOps_backprop_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_backprop_sum_pool, __pyx_k_CupyOps_backprop_sum_pool, sizeof(__pyx_k_CupyOps_backprop_sum_pool), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_clip_gradient, __pyx_k_CupyOps_clip_gradient, sizeof(__pyx_k_CupyOps_clip_gradient), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_gemm, __pyx_k_CupyOps_gemm, sizeof(__pyx_k_CupyOps_gemm), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_hash, __pyx_k_CupyOps_hash, sizeof(__pyx_k_CupyOps_hash), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_matmul, __pyx_k_CupyOps_matmul, sizeof(__pyx_k_CupyOps_matmul), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_max_pool, __pyx_k_CupyOps_max_pool, sizeof(__pyx_k_CupyOps_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_maxout, __pyx_k_CupyOps_maxout, sizeof(__pyx_k_CupyOps_maxout), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_mean_pool, __pyx_k_CupyOps_mean_pool, sizeof(__pyx_k_CupyOps_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_mish, __pyx_k_CupyOps_mish, sizeof(__pyx_k_CupyOps_mish), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_normal_init, __pyx_k_CupyOps_normal_init, sizeof(__pyx_k_CupyOps_normal_init), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_position_encode, __pyx_k_CupyOps_position_encode, sizeof(__pyx_k_CupyOps_position_encode), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_relu, __pyx_k_CupyOps_relu, sizeof(__pyx_k_CupyOps_relu), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_scatter_add, __pyx_k_CupyOps_scatter_add, sizeof(__pyx_k_CupyOps_scatter_add), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_selu, __pyx_k_CupyOps_selu, sizeof(__pyx_k_CupyOps_selu), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_seq2col, __pyx_k_CupyOps_seq2col, sizeof(__pyx_k_CupyOps_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_CupyOps_sum_pool, __pyx_k_CupyOps_sum_pool, sizeof(__pyx_k_CupyOps_sum_pool), 0, 0, 1, 1}, {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1}, {&__pyx_n_s_ElementwiseKernel, __pyx_k_ElementwiseKernel, sizeof(__pyx_k_ElementwiseKernel), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_I, __pyx_k_I, sizeof(__pyx_k_I), 0, 0, 1, 1}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_n_s_MemoryPointer, __pyx_k_MemoryPointer, sizeof(__pyx_k_MemoryPointer), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps, __pyx_k_NumpyOps, sizeof(__pyx_k_NumpyOps), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_adam, __pyx_k_NumpyOps_adam, sizeof(__pyx_k_NumpyOps_adam), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_add_sum, __pyx_k_NumpyOps_add_sum, sizeof(__pyx_k_NumpyOps_add_sum), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_affine, __pyx_k_NumpyOps_affine, sizeof(__pyx_k_NumpyOps_affine), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_allocate, __pyx_k_NumpyOps_allocate, sizeof(__pyx_k_NumpyOps_allocate), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_asarray, __pyx_k_NumpyOps_asarray, sizeof(__pyx_k_NumpyOps_asarray), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_elu, __pyx_k_NumpyOps_backprop_elu, sizeof(__pyx_k_NumpyOps_backprop_elu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_max_pool, __pyx_k_NumpyOps_backprop_max_pool, sizeof(__pyx_k_NumpyOps_backprop_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_maxout, __pyx_k_NumpyOps_backprop_maxout, sizeof(__pyx_k_NumpyOps_backprop_maxout), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_mean_pool, __pyx_k_NumpyOps_backprop_mean_pool, sizeof(__pyx_k_NumpyOps_backprop_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_mish, __pyx_k_NumpyOps_backprop_mish, sizeof(__pyx_k_NumpyOps_backprop_mish), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_relu, __pyx_k_NumpyOps_backprop_relu, sizeof(__pyx_k_NumpyOps_backprop_relu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_selu, __pyx_k_NumpyOps_backprop_selu, sizeof(__pyx_k_NumpyOps_backprop_selu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_seq2col, __pyx_k_NumpyOps_backprop_seq2col, sizeof(__pyx_k_NumpyOps_backprop_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_backprop_sum_pool, __pyx_k_NumpyOps_backprop_sum_pool, sizeof(__pyx_k_NumpyOps_backprop_sum_pool), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_elu, __pyx_k_NumpyOps_elu, sizeof(__pyx_k_NumpyOps_elu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_gemm, __pyx_k_NumpyOps_gemm, sizeof(__pyx_k_NumpyOps_gemm), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_hash, __pyx_k_NumpyOps_hash, sizeof(__pyx_k_NumpyOps_hash), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_increment_slices, __pyx_k_NumpyOps_increment_slices, sizeof(__pyx_k_NumpyOps_increment_slices), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_inplace_add, __pyx_k_NumpyOps_inplace_add, sizeof(__pyx_k_NumpyOps_inplace_add), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_matmul, __pyx_k_NumpyOps_matmul, sizeof(__pyx_k_NumpyOps_matmul), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_max_pool, __pyx_k_NumpyOps_max_pool, sizeof(__pyx_k_NumpyOps_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_maxout, __pyx_k_NumpyOps_maxout, sizeof(__pyx_k_NumpyOps_maxout), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_mean_pool, __pyx_k_NumpyOps_mean_pool, sizeof(__pyx_k_NumpyOps_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_mish, __pyx_k_NumpyOps_mish, sizeof(__pyx_k_NumpyOps_mish), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_ngrams, __pyx_k_NumpyOps_ngrams, sizeof(__pyx_k_NumpyOps_ngrams), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_position_encode, __pyx_k_NumpyOps_position_encode, sizeof(__pyx_k_NumpyOps_position_encode), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_relu, __pyx_k_NumpyOps_relu, sizeof(__pyx_k_NumpyOps_relu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_remap_ids, __pyx_k_NumpyOps_remap_ids, sizeof(__pyx_k_NumpyOps_remap_ids), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_scatter_add, __pyx_k_NumpyOps_scatter_add, sizeof(__pyx_k_NumpyOps_scatter_add), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_selu, __pyx_k_NumpyOps_selu, sizeof(__pyx_k_NumpyOps_selu), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_seq2col, __pyx_k_NumpyOps_seq2col, sizeof(__pyx_k_NumpyOps_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_NumpyOps_sum_pool, __pyx_k_NumpyOps_sum_pool, sizeof(__pyx_k_NumpyOps_sum_pool), 0, 0, 1, 1}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_n_s_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 1, 1}, {&__pyx_n_s_Ops, __pyx_k_Ops, sizeof(__pyx_k_Ops), 0, 0, 1, 1}, {&__pyx_n_s_Ops___init, __pyx_k_Ops___init, sizeof(__pyx_k_Ops___init), 0, 0, 1, 1}, {&__pyx_n_s_Ops_adam, __pyx_k_Ops_adam, sizeof(__pyx_k_Ops_adam), 0, 0, 1, 1}, {&__pyx_n_s_Ops_add_batch_outer, __pyx_k_Ops_add_batch_outer, sizeof(__pyx_k_Ops_add_batch_outer), 0, 0, 1, 1}, {&__pyx_n_s_Ops_add_sum, __pyx_k_Ops_add_sum, sizeof(__pyx_k_Ops_add_sum), 0, 0, 1, 1}, {&__pyx_n_s_Ops_affine, __pyx_k_Ops_affine, sizeof(__pyx_k_Ops_affine), 0, 0, 1, 1}, {&__pyx_n_s_Ops_allocate, __pyx_k_Ops_allocate, sizeof(__pyx_k_Ops_allocate), 0, 0, 1, 1}, {&__pyx_n_s_Ops_argmax, __pyx_k_Ops_argmax, sizeof(__pyx_k_Ops_argmax), 0, 0, 1, 1}, {&__pyx_n_s_Ops_asarray, __pyx_k_Ops_asarray, sizeof(__pyx_k_Ops_asarray), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_lstm, __pyx_k_Ops_backprop_lstm, sizeof(__pyx_k_Ops_backprop_lstm), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_mish, __pyx_k_Ops_backprop_mish, sizeof(__pyx_k_Ops_backprop_mish), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_seq2col, __pyx_k_Ops_backprop_seq2col, sizeof(__pyx_k_Ops_backprop_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_softmax, __pyx_k_Ops_backprop_softmax, sizeof(__pyx_k_Ops_backprop_softmax), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_softmax_sequences, __pyx_k_Ops_backprop_softmax_sequences, sizeof(__pyx_k_Ops_backprop_softmax_sequences), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_softplus, __pyx_k_Ops_backprop_softplus, sizeof(__pyx_k_Ops_backprop_softplus), 0, 0, 1, 1}, {&__pyx_n_s_Ops_backprop_take, __pyx_k_Ops_backprop_take, sizeof(__pyx_k_Ops_backprop_take), 0, 0, 1, 1}, {&__pyx_n_s_Ops_batch_dot, __pyx_k_Ops_batch_dot, sizeof(__pyx_k_Ops_batch_dot), 0, 0, 1, 1}, {&__pyx_n_s_Ops_clip_gradient, __pyx_k_Ops_clip_gradient, sizeof(__pyx_k_Ops_clip_gradient), 0, 0, 1, 1}, {&__pyx_n_s_Ops_clip_low, __pyx_k_Ops_clip_low, sizeof(__pyx_k_Ops_clip_low), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dot, __pyx_k_Ops_dot, sizeof(__pyx_k_Ops_dot), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout, __pyx_k_Ops_dropout, sizeof(__pyx_k_Ops_dropout), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_locals_lambda, __pyx_k_Ops_dropout_locals_lambda, sizeof(__pyx_k_Ops_dropout_locals_lambda), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_locals_wrap_backprop, __pyx_k_Ops_dropout_locals_wrap_backprop, sizeof(__pyx_k_Ops_dropout_locals_wrap_backprop), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_locals_wrap_backprop_2, __pyx_k_Ops_dropout_locals_wrap_backprop_2, sizeof(__pyx_k_Ops_dropout_locals_wrap_backprop_2), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_sequences, __pyx_k_Ops_dropout_sequences, sizeof(__pyx_k_Ops_dropout_sequences), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_sequences_locals_lam, __pyx_k_Ops_dropout_sequences_locals_lam, sizeof(__pyx_k_Ops_dropout_sequences_locals_lam), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_sequences_locals_wra, __pyx_k_Ops_dropout_sequences_locals_wra, sizeof(__pyx_k_Ops_dropout_sequences_locals_wra), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dropout_sequences_locals_wra_2, __pyx_k_Ops_dropout_sequences_locals_wra_2, sizeof(__pyx_k_Ops_dropout_sequences_locals_wra_2), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dsigmoid, __pyx_k_Ops_dsigmoid, sizeof(__pyx_k_Ops_dsigmoid), 0, 0, 1, 1}, {&__pyx_n_s_Ops_dtanh, __pyx_k_Ops_dtanh, sizeof(__pyx_k_Ops_dtanh), 0, 0, 1, 1}, {&__pyx_n_s_Ops_expand_dims, __pyx_k_Ops_expand_dims, sizeof(__pyx_k_Ops_expand_dims), 0, 0, 1, 1}, {&__pyx_n_s_Ops_flatten, __pyx_k_Ops_flatten, sizeof(__pyx_k_Ops_flatten), 0, 0, 1, 1}, {&__pyx_n_s_Ops_get_dropout_mask, __pyx_k_Ops_get_dropout_mask, sizeof(__pyx_k_Ops_get_dropout_mask), 0, 0, 1, 1}, {&__pyx_n_s_Ops_he_normal_init, __pyx_k_Ops_he_normal_init, sizeof(__pyx_k_Ops_he_normal_init), 0, 0, 1, 1}, {&__pyx_n_s_Ops_logloss, __pyx_k_Ops_logloss, sizeof(__pyx_k_Ops_logloss), 0, 0, 1, 1}, {&__pyx_n_s_Ops_lstm, __pyx_k_Ops_lstm, sizeof(__pyx_k_Ops_lstm), 0, 0, 1, 1}, {&__pyx_n_s_Ops_mish, __pyx_k_Ops_mish, sizeof(__pyx_k_Ops_mish), 0, 0, 1, 1}, {&__pyx_n_s_Ops_norm, __pyx_k_Ops_norm, sizeof(__pyx_k_Ops_norm), 0, 0, 1, 1}, {&__pyx_n_s_Ops_normal_init, __pyx_k_Ops_normal_init, sizeof(__pyx_k_Ops_normal_init), 0, 0, 1, 1}, {&__pyx_n_s_Ops_seq2col, __pyx_k_Ops_seq2col, sizeof(__pyx_k_Ops_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_Ops_sigmoid, __pyx_k_Ops_sigmoid, sizeof(__pyx_k_Ops_sigmoid), 0, 0, 1, 1}, {&__pyx_n_s_Ops_softmax, __pyx_k_Ops_softmax, sizeof(__pyx_k_Ops_softmax), 0, 0, 1, 1}, {&__pyx_n_s_Ops_softmax_sequences, __pyx_k_Ops_softmax_sequences, sizeof(__pyx_k_Ops_softmax_sequences), 0, 0, 1, 1}, {&__pyx_n_s_Ops_softplus, __pyx_k_Ops_softplus, sizeof(__pyx_k_Ops_softplus), 0, 0, 1, 1}, {&__pyx_n_s_Ops_square_sequences, __pyx_k_Ops_square_sequences, sizeof(__pyx_k_Ops_square_sequences), 0, 0, 1, 1}, {&__pyx_n_s_Ops_square_sequences_locals_unpa, __pyx_k_Ops_square_sequences_locals_unpa, sizeof(__pyx_k_Ops_square_sequences_locals_unpa), 0, 0, 1, 1}, {&__pyx_n_s_Ops_take_which, __pyx_k_Ops_take_which, sizeof(__pyx_k_Ops_take_which), 0, 0, 1, 1}, {&__pyx_n_s_Ops_unflatten, __pyx_k_Ops_unflatten, sizeof(__pyx_k_Ops_unflatten), 0, 0, 1, 1}, {&__pyx_n_s_Ops_unzip, __pyx_k_Ops_unzip, sizeof(__pyx_k_Ops_unzip), 0, 0, 1, 1}, {&__pyx_n_s_Ops_update_averages, __pyx_k_Ops_update_averages, sizeof(__pyx_k_Ops_update_averages), 0, 0, 1, 1}, {&__pyx_n_s_Ops_xavier_uniform_init, __pyx_k_Ops_xavier_uniform_init, sizeof(__pyx_k_Ops_xavier_uniform_init), 0, 0, 1, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_P, __pyx_k_P, sizeof(__pyx_k_P), 0, 0, 1, 1}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_Sized, __pyx_k_Sized, sizeof(__pyx_k_Sized), 0, 0, 1, 1}, {&__pyx_kp_s_Softmax_currently_only_supports, __pyx_k_Softmax_currently_only_supports, sizeof(__pyx_k_Softmax_currently_only_supports), 0, 0, 1, 0}, {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, {&__pyx_kp_s_T_grad_T_lr_T_one_minus_beta1_T, __pyx_k_T_grad_T_lr_T_one_minus_beta1_T, sizeof(__pyx_k_T_grad_T_lr_T_one_minus_beta1_T), 0, 0, 1, 0}, {&__pyx_kp_s_T_param_T_m_T_v, __pyx_k_T_param_T_m_T_v, sizeof(__pyx_k_T_param_T_m_T_v), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_W, __pyx_k_W, sizeof(__pyx_k_W), 0, 0, 1, 1}, {&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1}, {&__pyx_n_s_Xs, __pyx_k_Xs, sizeof(__pyx_k_Xs), 0, 0, 1, 1}, {&__pyx_n_s_Xsoft, __pyx_k_Xsoft, sizeof(__pyx_k_Xsoft), 0, 0, 1, 1}, {&__pyx_n_s_Xsub, __pyx_k_Xsub, sizeof(__pyx_k_Xsub), 0, 0, 1, 1}, {&__pyx_n_s_Y, __pyx_k_Y, sizeof(__pyx_k_Y), 0, 0, 1, 1}, {&__pyx_n_s_Y_ptr, __pyx_k_Y_ptr, sizeof(__pyx_k_Y_ptr), 0, 0, 1, 1}, {&__pyx_n_s__144, __pyx_k__144, sizeof(__pyx_k__144), 0, 0, 1, 1}, {&__pyx_n_s_a, __pyx_k_a, sizeof(__pyx_k_a), 0, 0, 1, 1}, {&__pyx_n_s_act_pieces, __pyx_k_act_pieces, sizeof(__pyx_k_act_pieces), 0, 0, 1, 1}, {&__pyx_n_s_adam, __pyx_k_adam, sizeof(__pyx_k_adam), 0, 0, 1, 1}, {&__pyx_n_s_add, __pyx_k_add, sizeof(__pyx_k_add), 0, 0, 1, 1}, {&__pyx_n_s_add_batch_outer, __pyx_k_add_batch_outer, sizeof(__pyx_k_add_batch_outer), 0, 0, 1, 1}, {&__pyx_n_s_add_gradient_noise, __pyx_k_add_gradient_noise, sizeof(__pyx_k_add_gradient_noise), 0, 0, 1, 1}, {&__pyx_n_s_add_sum, __pyx_k_add_sum, sizeof(__pyx_k_add_sum), 0, 0, 1, 1}, {&__pyx_n_s_affine, __pyx_k_affine, sizeof(__pyx_k_affine), 0, 0, 1, 1}, {&__pyx_n_s_aligned_alloc, __pyx_k_aligned_alloc, sizeof(__pyx_k_aligned_alloc), 0, 0, 1, 1}, {&__pyx_n_s_allocate, __pyx_k_allocate, sizeof(__pyx_k_allocate), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_argmax, __pyx_k_argmax, sizeof(__pyx_k_argmax), 0, 0, 1, 1}, {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, {&__pyx_n_s_arr, __pyx_k_arr, sizeof(__pyx_k_arr), 0, 0, 1, 1}, {&__pyx_n_s_arr_i, __pyx_k_arr_i, sizeof(__pyx_k_arr_i), 0, 0, 1, 1}, {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_ascontiguousarray, __pyx_k_ascontiguousarray, sizeof(__pyx_k_ascontiguousarray), 0, 0, 1, 1}, {&__pyx_n_s_at, __pyx_k_at, sizeof(__pyx_k_at), 0, 0, 1, 1}, {&__pyx_n_s_axes, __pyx_k_axes, sizeof(__pyx_k_axes), 0, 0, 1, 1}, {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, {&__pyx_n_s_backprop, __pyx_k_backprop, sizeof(__pyx_k_backprop), 0, 0, 1, 1}, {&__pyx_n_s_backprop_elu, __pyx_k_backprop_elu, sizeof(__pyx_k_backprop_elu), 0, 0, 1, 1}, {&__pyx_n_s_backprop_lstm, __pyx_k_backprop_lstm, sizeof(__pyx_k_backprop_lstm), 0, 0, 1, 1}, {&__pyx_n_s_backprop_max_pool, __pyx_k_backprop_max_pool, sizeof(__pyx_k_backprop_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_backprop_maxout, __pyx_k_backprop_maxout, sizeof(__pyx_k_backprop_maxout), 0, 0, 1, 1}, {&__pyx_n_s_backprop_mean_pool, __pyx_k_backprop_mean_pool, sizeof(__pyx_k_backprop_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_backprop_mish, __pyx_k_backprop_mish, sizeof(__pyx_k_backprop_mish), 0, 0, 1, 1}, {&__pyx_n_s_backprop_relu, __pyx_k_backprop_relu, sizeof(__pyx_k_backprop_relu), 0, 0, 1, 1}, {&__pyx_n_s_backprop_selu, __pyx_k_backprop_selu, sizeof(__pyx_k_backprop_selu), 0, 0, 1, 1}, {&__pyx_n_s_backprop_seq2col, __pyx_k_backprop_seq2col, sizeof(__pyx_k_backprop_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_backprop_softmax, __pyx_k_backprop_softmax, sizeof(__pyx_k_backprop_softmax), 0, 0, 1, 1}, {&__pyx_n_s_backprop_softmax_sequences, __pyx_k_backprop_softmax_sequences, sizeof(__pyx_k_backprop_softmax_sequences), 0, 0, 1, 1}, {&__pyx_n_s_backprop_softplus, __pyx_k_backprop_softplus, sizeof(__pyx_k_backprop_softplus), 0, 0, 1, 1}, {&__pyx_n_s_backprop_sum_pool, __pyx_k_backprop_sum_pool, sizeof(__pyx_k_backprop_sum_pool), 0, 0, 1, 1}, {&__pyx_n_s_backprop_take, __pyx_k_backprop_take, sizeof(__pyx_k_backprop_take), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_batch_dot, __pyx_k_batch_dot, sizeof(__pyx_k_batch_dot), 0, 0, 1, 1}, {&__pyx_n_s_batch_size_at_t, __pyx_k_batch_size_at_t, sizeof(__pyx_k_batch_size_at_t), 0, 0, 1, 1}, {&__pyx_n_s_best, __pyx_k_best, sizeof(__pyx_k_best), 0, 0, 1, 1}, {&__pyx_n_s_beta1, __pyx_k_beta1, sizeof(__pyx_k_beta1), 0, 0, 1, 1}, {&__pyx_n_s_beta2, __pyx_k_beta2, sizeof(__pyx_k_beta2), 0, 0, 1, 1}, {&__pyx_n_s_bias, __pyx_k_bias, sizeof(__pyx_k_bias), 0, 0, 1, 1}, {&__pyx_n_s_blis, __pyx_k_blis, sizeof(__pyx_k_blis), 0, 0, 1, 1}, {&__pyx_n_s_blis_py, __pyx_k_blis_py, sizeof(__pyx_k_blis_py), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_c_contiguous, __pyx_k_c_contiguous, sizeof(__pyx_k_c_contiguous), 0, 0, 1, 1}, {&__pyx_n_s_cells, __pyx_k_cells, sizeof(__pyx_k_cells), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_clip, __pyx_k_clip, sizeof(__pyx_k_clip), 0, 0, 1, 1}, {&__pyx_n_s_clip_gradient, __pyx_k_clip_gradient, sizeof(__pyx_k_clip_gradient), 0, 0, 1, 1}, {&__pyx_n_s_clip_low, __pyx_k_clip_low, sizeof(__pyx_k_clip_low), 0, 0, 1, 1}, {&__pyx_n_s_coinflips, __pyx_k_coinflips, sizeof(__pyx_k_coinflips), 0, 0, 1, 1}, {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, {&__pyx_n_s_collections_abc, __pyx_k_collections_abc, sizeof(__pyx_k_collections_abc), 0, 0, 1, 1}, {&__pyx_n_s_cols, __pyx_k_cols, sizeof(__pyx_k_cols), 0, 0, 1, 1}, {&__pyx_n_s_compat, __pyx_k_compat, sizeof(__pyx_k_compat), 0, 0, 1, 1}, {&__pyx_n_s_compile_with_cache, __pyx_k_compile_with_cache, sizeof(__pyx_k_compile_with_cache), 0, 0, 1, 1}, {&__pyx_n_s_concatenate, __pyx_k_concatenate, sizeof(__pyx_k_concatenate), 0, 0, 1, 1}, {&__pyx_n_s_contig_array, __pyx_k_contig_array, sizeof(__pyx_k_contig_array), 0, 0, 1, 1}, {&__pyx_n_s_contig_starts, __pyx_k_contig_starts, sizeof(__pyx_k_contig_starts), 0, 0, 1, 1}, {&__pyx_n_s_contig_to_add, __pyx_k_contig_to_add, sizeof(__pyx_k_contig_to_add), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, {&__pyx_n_s_copy_array, __pyx_k_copy_array, sizeof(__pyx_k_copy_array), 0, 0, 1, 1}, {&__pyx_n_s_cpu, __pyx_k_cpu, sizeof(__pyx_k_cpu), 0, 0, 1, 1}, {&__pyx_n_s_cpu_clip_gradient, __pyx_k_cpu_clip_gradient, sizeof(__pyx_k_cpu_clip_gradient), 0, 0, 1, 1}, {&__pyx_n_s_ct, __pyx_k_ct, sizeof(__pyx_k_ct), 0, 0, 1, 1}, {&__pyx_n_s_cuda, __pyx_k_cuda, sizeof(__pyx_k_cuda), 0, 0, 1, 1}, {&__pyx_n_s_cupy, __pyx_k_cupy, sizeof(__pyx_k_cupy), 0, 0, 1, 1}, {&__pyx_n_s_cupy_cuda, __pyx_k_cupy_cuda, sizeof(__pyx_k_cupy_cuda), 0, 0, 1, 1}, {&__pyx_n_s_cupy_cuda_compiler, __pyx_k_cupy_cuda_compiler, sizeof(__pyx_k_cupy_cuda_compiler), 0, 0, 1, 1}, {&__pyx_n_s_custom_kernels, __pyx_k_custom_kernels, sizeof(__pyx_k_custom_kernels), 0, 0, 1, 1}, {&__pyx_n_s_dX, __pyx_k_dX, sizeof(__pyx_k_dX), 0, 0, 1, 1}, {&__pyx_n_s_dX__bo, __pyx_k_dX__bo, sizeof(__pyx_k_dX__bo), 0, 0, 1, 1}, {&__pyx_n_s_dX__bop, __pyx_k_dX__bop, sizeof(__pyx_k_dX__bop), 0, 0, 1, 1}, {&__pyx_n_s_dX_ptr, __pyx_k_dX_ptr, sizeof(__pyx_k_dX_ptr), 0, 0, 1, 1}, {&__pyx_n_s_dXsub, __pyx_k_dXsub, sizeof(__pyx_k_dXsub), 0, 0, 1, 1}, {&__pyx_n_s_dY, __pyx_k_dY, sizeof(__pyx_k_dY), 0, 0, 1, 1}, {&__pyx_n_s_dYsub, __pyx_k_dYsub, sizeof(__pyx_k_dYsub), 0, 0, 1, 1}, {&__pyx_n_s_d_cells, __pyx_k_d_cells, sizeof(__pyx_k_d_cells), 0, 0, 1, 1}, {&__pyx_n_s_d_gate_pieces, __pyx_k_d_gate_pieces, sizeof(__pyx_k_d_gate_pieces), 0, 0, 1, 1}, {&__pyx_n_s_d_maxes, __pyx_k_d_maxes, sizeof(__pyx_k_d_maxes), 0, 0, 1, 1}, {&__pyx_n_s_d_means, __pyx_k_d_means, sizeof(__pyx_k_d_means), 0, 0, 1, 1}, {&__pyx_n_s_d_output, __pyx_k_d_output, sizeof(__pyx_k_d_output), 0, 0, 1, 1}, {&__pyx_n_s_d_prev, __pyx_k_d_prev, sizeof(__pyx_k_d_prev), 0, 0, 1, 1}, {&__pyx_n_s_d_sums, __pyx_k_d_sums, sizeof(__pyx_k_d_sums), 0, 0, 1, 1}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_s_data_ptr, __pyx_k_data_ptr, sizeof(__pyx_k_data_ptr), 0, 0, 1, 1}, {&__pyx_n_s_dc, __pyx_k_dc, sizeof(__pyx_k_dc), 0, 0, 1, 1}, {&__pyx_n_s_decay, __pyx_k_decay, sizeof(__pyx_k_decay), 0, 0, 1, 1}, {&__pyx_n_s_delta, __pyx_k_delta, sizeof(__pyx_k_delta), 0, 0, 1, 1}, {&__pyx_n_s_delta_2, __pyx_k_delta_2, sizeof(__pyx_k_delta_2), 0, 0, 1, 1}, {&__pyx_n_s_dest, __pyx_k_dest, sizeof(__pyx_k_dest), 0, 0, 1, 1}, {&__pyx_n_s_device, __pyx_k_device, sizeof(__pyx_k_device), 0, 0, 1, 1}, {&__pyx_n_s_dhc, __pyx_k_dhc, sizeof(__pyx_k_dhc), 0, 0, 1, 1}, {&__pyx_n_s_dhf, __pyx_k_dhf, sizeof(__pyx_k_dhf), 0, 0, 1, 1}, {&__pyx_n_s_dhi, __pyx_k_dhi, sizeof(__pyx_k_dhi), 0, 0, 1, 1}, {&__pyx_n_s_dho, __pyx_k_dho, sizeof(__pyx_k_dho), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, {&__pyx_n_s_dot, __pyx_k_dot, sizeof(__pyx_k_dot), 0, 0, 1, 1}, {&__pyx_n_s_dotted, __pyx_k_dotted, sizeof(__pyx_k_dotted), 0, 0, 1, 1}, {&__pyx_n_s_drop, __pyx_k_drop, sizeof(__pyx_k_drop), 0, 0, 1, 1}, {&__pyx_n_s_dropout, __pyx_k_dropout, sizeof(__pyx_k_dropout), 0, 0, 1, 1}, {&__pyx_n_s_dropout_sequences, __pyx_k_dropout_sequences, sizeof(__pyx_k_dropout_sequences), 0, 0, 1, 1}, {&__pyx_n_s_dsigmoid, __pyx_k_dsigmoid, sizeof(__pyx_k_dsigmoid), 0, 0, 1, 1}, {&__pyx_n_s_dtanh, __pyx_k_dtanh, sizeof(__pyx_k_dtanh), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_elu, __pyx_k_elu, sizeof(__pyx_k_elu), 0, 0, 1, 1}, {&__pyx_n_s_ema, __pyx_k_ema, sizeof(__pyx_k_ema), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_entropy, __pyx_k_entropy, sizeof(__pyx_k_entropy), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_eps, __pyx_k_eps, sizeof(__pyx_k_eps), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1}, {&__pyx_n_s_expand_dims, __pyx_k_expand_dims, sizeof(__pyx_k_expand_dims), 0, 0, 1, 1}, {&__pyx_n_s_extra_dims, __pyx_k_extra_dims, sizeof(__pyx_k_extra_dims), 0, 0, 1, 1}, {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, {&__pyx_n_s_fan_in, __pyx_k_fan_in, sizeof(__pyx_k_fan_in), 0, 0, 1, 1}, {&__pyx_n_s_fill, __pyx_k_fill, sizeof(__pyx_k_fill), 0, 0, 1, 1}, {&__pyx_n_s_finish_update, __pyx_k_finish_update, sizeof(__pyx_k_finish_update), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_flatten, __pyx_k_flatten, sizeof(__pyx_k_flatten), 0, 0, 1, 1}, {&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_gate_pieces, __pyx_k_gate_pieces, sizeof(__pyx_k_gate_pieces), 0, 0, 1, 1}, {&__pyx_n_s_gemm, __pyx_k_gemm, sizeof(__pyx_k_gemm), 0, 0, 1, 1}, {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, {&__pyx_n_s_get_array_module, __pyx_k_get_array_module, sizeof(__pyx_k_get_array_module), 0, 0, 1, 1}, {&__pyx_n_s_get_dropout_mask, __pyx_k_get_dropout_mask, sizeof(__pyx_k_get_dropout_mask), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_gpu, __pyx_k_gpu, sizeof(__pyx_k_gpu), 0, 0, 1, 1}, {&__pyx_n_s_grad_norm, __pyx_k_grad_norm, sizeof(__pyx_k_grad_norm), 0, 0, 1, 1}, {&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1}, {&__pyx_n_s_hash, __pyx_k_hash, sizeof(__pyx_k_hash), 0, 0, 1, 1}, {&__pyx_n_s_hc, __pyx_k_hc, sizeof(__pyx_k_hc), 0, 0, 1, 1}, {&__pyx_n_s_he_normal_init, __pyx_k_he_normal_init, sizeof(__pyx_k_he_normal_init), 0, 0, 1, 1}, {&__pyx_n_s_hf, __pyx_k_hf, sizeof(__pyx_k_hf), 0, 0, 1, 1}, {&__pyx_n_s_hi, __pyx_k_hi, sizeof(__pyx_k_hi), 0, 0, 1, 1}, {&__pyx_n_s_ho, __pyx_k_ho, sizeof(__pyx_k_ho), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_ids, __pyx_k_ids, sizeof(__pyx_k_ids), 0, 0, 1, 1}, {&__pyx_n_s_ids_mv, __pyx_k_ids_mv, sizeof(__pyx_k_ids_mv), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_in_size, __pyx_k_in_size, sizeof(__pyx_k_in_size), 0, 0, 1, 1}, {&__pyx_n_s_increment_slices, __pyx_k_increment_slices, sizeof(__pyx_k_increment_slices), 0, 0, 1, 1}, {&__pyx_n_s_indices, __pyx_k_indices, sizeof(__pyx_k_indices), 0, 0, 1, 1}, {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, {&__pyx_n_s_inits, __pyx_k_inits, sizeof(__pyx_k_inits), 0, 0, 1, 1}, {&__pyx_n_s_inplace, __pyx_k_inplace, sizeof(__pyx_k_inplace), 0, 0, 1, 1}, {&__pyx_n_s_inplace_add, __pyx_k_inplace_add, sizeof(__pyx_k_inplace_add), 0, 0, 1, 1}, {&__pyx_n_s_inputs, __pyx_k_inputs, sizeof(__pyx_k_inputs), 0, 0, 1, 1}, {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, {&__pyx_n_s_integer_types, __pyx_k_integer_types, sizeof(__pyx_k_integer_types), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_keepdims, __pyx_k_keepdims, sizeof(__pyx_k_keepdims), 0, 0, 1, 1}, {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, {&__pyx_n_s_keys, __pyx_k_keys, sizeof(__pyx_k_keys), 0, 0, 1, 1}, {&__pyx_n_s_keys_2, __pyx_k_keys_2, sizeof(__pyx_k_keys_2), 0, 0, 1, 1}, {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, {&__pyx_n_s_learn_rate, __pyx_k_learn_rate, sizeof(__pyx_k_learn_rate), 0, 0, 1, 1}, {&__pyx_n_s_length, __pyx_k_length, sizeof(__pyx_k_length), 0, 0, 1, 1}, {&__pyx_n_s_lengths, __pyx_k_lengths, sizeof(__pyx_k_lengths), 0, 0, 1, 1}, {&__pyx_n_s_lengths_indices, __pyx_k_lengths_indices, sizeof(__pyx_k_lengths_indices), 0, 0, 1, 1}, {&__pyx_n_s_linalg, __pyx_k_linalg, sizeof(__pyx_k_linalg), 0, 0, 1, 1}, {&__pyx_n_s_loc, __pyx_k_loc, sizeof(__pyx_k_loc), 0, 0, 1, 1}, {&__pyx_n_s_log, __pyx_k_log, sizeof(__pyx_k_log), 0, 0, 1, 1}, {&__pyx_n_s_log1p, __pyx_k_log1p, sizeof(__pyx_k_log1p), 0, 0, 1, 1}, {&__pyx_n_s_log1p_exp, __pyx_k_log1p_exp, sizeof(__pyx_k_log1p_exp), 0, 0, 1, 1}, {&__pyx_n_s_log_yp, __pyx_k_log_yp, sizeof(__pyx_k_log_yp), 0, 0, 1, 1}, {&__pyx_n_s_logloss, __pyx_k_logloss, sizeof(__pyx_k_logloss), 0, 0, 1, 1}, {&__pyx_n_s_loss, __pyx_k_loss, sizeof(__pyx_k_loss), 0, 0, 1, 1}, {&__pyx_n_s_lstm, __pyx_k_lstm, sizeof(__pyx_k_lstm), 0, 0, 1, 1}, {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, {&__pyx_kp_s_m_one_minus_beta1_grad_m_v_one_m, __pyx_k_m_one_minus_beta1_grad_m_v_one_m, sizeof(__pyx_k_m_one_minus_beta1_grad_m_v_one_m), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mapped, __pyx_k_mapped, sizeof(__pyx_k_mapped), 0, 0, 1, 1}, {&__pyx_n_s_mapping, __pyx_k_mapping, sizeof(__pyx_k_mapping), 0, 0, 1, 1}, {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, {&__pyx_n_s_masked, __pyx_k_masked, sizeof(__pyx_k_masked), 0, 0, 1, 1}, {&__pyx_n_s_masks, __pyx_k_masks, sizeof(__pyx_k_masks), 0, 0, 1, 1}, {&__pyx_n_s_matmul, __pyx_k_matmul, sizeof(__pyx_k_matmul), 0, 0, 1, 1}, {&__pyx_n_s_max, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1}, {&__pyx_n_s_max_decay, __pyx_k_max_decay, sizeof(__pyx_k_max_decay), 0, 0, 1, 1}, {&__pyx_n_s_max_pool, __pyx_k_max_pool, sizeof(__pyx_k_max_pool), 0, 0, 1, 1}, {&__pyx_n_s_maxes, __pyx_k_maxes, sizeof(__pyx_k_maxes), 0, 0, 1, 1}, {&__pyx_n_s_maximum, __pyx_k_maximum, sizeof(__pyx_k_maximum), 0, 0, 1, 1}, {&__pyx_n_s_maxout, __pyx_k_maxout, sizeof(__pyx_k_maxout), 0, 0, 1, 1}, {&__pyx_n_s_mean_pool, __pyx_k_mean_pool, sizeof(__pyx_k_mean_pool), 0, 0, 1, 1}, {&__pyx_n_s_means, __pyx_k_means, sizeof(__pyx_k_means), 0, 0, 1, 1}, {&__pyx_n_s_mem, __pyx_k_mem, sizeof(__pyx_k_mem), 0, 0, 1, 1}, {&__pyx_n_s_memptr, __pyx_k_memptr, sizeof(__pyx_k_memptr), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, {&__pyx_n_s_mish, __pyx_k_mish, sizeof(__pyx_k_mish), 0, 0, 1, 1}, {&__pyx_n_s_mod_rate, __pyx_k_mod_rate, sizeof(__pyx_k_mod_rate), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, {&__pyx_n_s_mom1, __pyx_k_mom1, sizeof(__pyx_k_mom1), 0, 0, 1, 1}, {&__pyx_n_s_mom2, __pyx_k_mom2, sizeof(__pyx_k_mom2), 0, 0, 1, 1}, {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, {&__pyx_n_s_nB, __pyx_k_nB, sizeof(__pyx_k_nB), 0, 0, 1, 1}, {&__pyx_n_s_nF, __pyx_k_nF, sizeof(__pyx_k_nF), 0, 0, 1, 1}, {&__pyx_n_s_nP, __pyx_k_nP, sizeof(__pyx_k_nP), 0, 0, 1, 1}, {&__pyx_n_s_nS, __pyx_k_nS, sizeof(__pyx_k_nS), 0, 0, 1, 1}, {&__pyx_n_s_nW, __pyx_k_nW, sizeof(__pyx_k_nW), 0, 0, 1, 1}, {&__pyx_n_s_n_items, __pyx_k_n_items, sizeof(__pyx_k_n_items), 0, 0, 1, 1}, {&__pyx_n_s_n_slice, __pyx_k_n_slice, sizeof(__pyx_k_n_slice), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndarray, __pyx_k_ndarray, sizeof(__pyx_k_ndarray), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_n_s_new_x, __pyx_k_new_x, sizeof(__pyx_k_new_x), 0, 0, 1, 1}, {&__pyx_n_s_ngrams, __pyx_k_ngrams, sizeof(__pyx_k_ngrams), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_noise_level, __pyx_k_noise_level, sizeof(__pyx_k_noise_level), 0, 0, 1, 1}, {&__pyx_n_s_norm, __pyx_k_norm, sizeof(__pyx_k_norm), 0, 0, 1, 1}, {&__pyx_n_s_normal, __pyx_k_normal, sizeof(__pyx_k_normal), 0, 0, 1, 1}, {&__pyx_n_s_normal_init, __pyx_k_normal_init, sizeof(__pyx_k_normal_init), 0, 0, 1, 1}, {&__pyx_n_s_nr_weight, __pyx_k_nr_weight, sizeof(__pyx_k_nr_weight), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1}, {&__pyx_n_s_omega, __pyx_k_omega, sizeof(__pyx_k_omega), 0, 0, 1, 1}, {&__pyx_kp_s_ops_pyx, __pyx_k_ops_pyx, sizeof(__pyx_k_ops_pyx), 0, 0, 1, 0}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_out, __pyx_k_out, sizeof(__pyx_k_out), 0, 0, 1, 1}, {&__pyx_n_s_out_2, __pyx_k_out_2, sizeof(__pyx_k_out_2), 0, 0, 1, 1}, {&__pyx_n_s_out_array, __pyx_k_out_array, sizeof(__pyx_k_out_array), 0, 0, 1, 1}, {&__pyx_n_s_output, __pyx_k_output, sizeof(__pyx_k_output), 0, 0, 1, 1}, {&__pyx_n_s_output_2, __pyx_k_output_2, sizeof(__pyx_k_output_2), 0, 0, 1, 1}, {&__pyx_n_s_output_arr, __pyx_k_output_arr, sizeof(__pyx_k_output_arr), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pad, __pyx_k_pad, sizeof(__pyx_k_pad), 0, 0, 1, 1}, {&__pyx_n_s_padded, __pyx_k_padded, sizeof(__pyx_k_padded), 0, 0, 1, 1}, {&__pyx_n_s_period, __pyx_k_period, sizeof(__pyx_k_period), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pointer, __pyx_k_pointer, sizeof(__pyx_k_pointer), 0, 0, 1, 1}, {&__pyx_n_s_position_encode, __pyx_k_position_encode, sizeof(__pyx_k_position_encode), 0, 0, 1, 1}, {&__pyx_n_s_positions, __pyx_k_positions, sizeof(__pyx_k_positions), 0, 0, 1, 1}, {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, {&__pyx_n_s_prev, __pyx_k_prev, sizeof(__pyx_k_prev), 0, 0, 1, 1}, {&__pyx_n_s_prod, __pyx_k_prod, sizeof(__pyx_k_prod), 0, 0, 1, 1}, {&__pyx_n_s_py, __pyx_k_py, sizeof(__pyx_k_py), 0, 0, 1, 1}, {&__pyx_n_s_py_best, __pyx_k_py_best, sizeof(__pyx_k_py_best), 0, 0, 1, 1}, {&__pyx_n_s_py_cands, __pyx_k_py_cands, sizeof(__pyx_k_py_cands), 0, 0, 1, 1}, {&__pyx_n_s_py_which, __pyx_k_py_which, sizeof(__pyx_k_py_which), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_relu, __pyx_k_relu, sizeof(__pyx_k_relu), 0, 0, 1, 1}, {&__pyx_n_s_remap_ids, __pyx_k_remap_ids, sizeof(__pyx_k_remap_ids), 0, 0, 1, 1}, {&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1}, {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, {&__pyx_n_s_reverse, __pyx_k_reverse, sizeof(__pyx_k_reverse), 0, 0, 1, 1}, {&__pyx_n_s_scale, __pyx_k_scale, sizeof(__pyx_k_scale), 0, 0, 1, 1}, {&__pyx_n_s_scatter_add, __pyx_k_scatter_add, sizeof(__pyx_k_scatter_add), 0, 0, 1, 1}, {&__pyx_n_s_seed, __pyx_k_seed, sizeof(__pyx_k_seed), 0, 0, 1, 1}, {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, {&__pyx_n_s_selu, __pyx_k_selu, sizeof(__pyx_k_selu), 0, 0, 1, 1}, {&__pyx_n_s_seq, __pyx_k_seq, sizeof(__pyx_k_seq), 0, 0, 1, 1}, {&__pyx_n_s_seq2col, __pyx_k_seq2col, sizeof(__pyx_k_seq2col), 0, 0, 1, 1}, {&__pyx_n_s_seqs, __pyx_k_seqs, sizeof(__pyx_k_seqs), 0, 0, 1, 1}, {&__pyx_n_s_seqs_i, __pyx_k_seqs_i, sizeof(__pyx_k_seqs_i), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_shifted, __pyx_k_shifted, sizeof(__pyx_k_shifted), 0, 0, 1, 1}, {&__pyx_n_s_sigmoid, __pyx_k_sigmoid, sizeof(__pyx_k_sigmoid), 0, 0, 1, 1}, {&__pyx_n_s_signal, __pyx_k_signal, sizeof(__pyx_k_signal), 0, 0, 1, 1}, {&__pyx_n_s_signal_in, __pyx_k_signal_in, sizeof(__pyx_k_signal_in), 0, 0, 1, 1}, {&__pyx_n_s_signal_in_2, __pyx_k_signal_in_2, sizeof(__pyx_k_signal_in_2), 0, 0, 1, 1}, {&__pyx_n_s_signal_out, __pyx_k_signal_out, sizeof(__pyx_k_signal_out), 0, 0, 1, 1}, {&__pyx_n_s_signal_out_2, __pyx_k_signal_out_2, sizeof(__pyx_k_signal_out_2), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_softmax, __pyx_k_softmax, sizeof(__pyx_k_softmax), 0, 0, 1, 1}, {&__pyx_n_s_softmax_sequences, __pyx_k_softmax_sequences, sizeof(__pyx_k_softmax_sequences), 0, 0, 1, 1}, {&__pyx_n_s_softplus, __pyx_k_softplus, sizeof(__pyx_k_softplus), 0, 0, 1, 1}, {&__pyx_n_s_sort, __pyx_k_sort, sizeof(__pyx_k_sort), 0, 0, 1, 1}, {&__pyx_n_s_sqrt, __pyx_k_sqrt, sizeof(__pyx_k_sqrt), 0, 0, 1, 1}, {&__pyx_n_s_square_sequences, __pyx_k_square_sequences, sizeof(__pyx_k_square_sequences), 0, 0, 1, 1}, {&__pyx_n_s_src, __pyx_k_src, sizeof(__pyx_k_src), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_starts, __pyx_k_starts, sizeof(__pyx_k_starts), 0, 0, 1, 1}, {&__pyx_n_s_starts_2, __pyx_k_starts_2, sizeof(__pyx_k_starts_2), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_n_s_stride, __pyx_k_stride, sizeof(__pyx_k_stride), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, {&__pyx_n_s_sum_pool, __pyx_k_sum_pool, sizeof(__pyx_k_sum_pool), 0, 0, 1, 1}, {&__pyx_n_s_sumdx, __pyx_k_sumdx, sizeof(__pyx_k_sumdx), 0, 0, 1, 1}, {&__pyx_n_s_summed, __pyx_k_summed, sizeof(__pyx_k_summed), 0, 0, 1, 1}, {&__pyx_n_s_sums, __pyx_k_sums, sizeof(__pyx_k_sums), 0, 0, 1, 1}, {&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1}, {&__pyx_n_s_take_which, __pyx_k_take_which, sizeof(__pyx_k_take_which), 0, 0, 1, 1}, {&__pyx_n_s_tanh, __pyx_k_tanh, sizeof(__pyx_k_tanh), 0, 0, 1, 1}, {&__pyx_n_s_tensordot, __pyx_k_tensordot, sizeof(__pyx_k_tensordot), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thinc_neural_ops, __pyx_k_thinc_neural_ops, sizeof(__pyx_k_thinc_neural_ops), 0, 0, 1, 1}, {&__pyx_n_s_threshold, __pyx_k_threshold, sizeof(__pyx_k_threshold), 0, 0, 1, 1}, {&__pyx_n_s_timestep, __pyx_k_timestep, sizeof(__pyx_k_timestep), 0, 0, 1, 1}, {&__pyx_n_s_to_add, __pyx_k_to_add, sizeof(__pyx_k_to_add), 0, 0, 1, 1}, {&__pyx_n_s_to_add_2, __pyx_k_to_add_2, sizeof(__pyx_k_to_add_2), 0, 0, 1, 1}, {&__pyx_n_s_to_sum, __pyx_k_to_sum, sizeof(__pyx_k_to_sum), 0, 0, 1, 1}, {&__pyx_n_s_trans1, __pyx_k_trans1, sizeof(__pyx_k_trans1), 0, 0, 1, 1}, {&__pyx_n_s_trans2, __pyx_k_trans2, sizeof(__pyx_k_trans2), 0, 0, 1, 1}, {&__pyx_n_s_transpose, __pyx_k_transpose, sizeof(__pyx_k_transpose), 0, 0, 1, 1}, {&__pyx_n_s_uint32, __pyx_k_uint32, sizeof(__pyx_k_uint32), 0, 0, 1, 1}, {&__pyx_n_s_uint64, __pyx_k_uint64, sizeof(__pyx_k_uint64), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unflat, __pyx_k_unflat, sizeof(__pyx_k_unflat), 0, 0, 1, 1}, {&__pyx_n_s_unflatten, __pyx_k_unflatten, sizeof(__pyx_k_unflatten), 0, 0, 1, 1}, {&__pyx_n_s_uniform, __pyx_k_uniform, sizeof(__pyx_k_uniform), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_unpad, __pyx_k_unpad, sizeof(__pyx_k_unpad), 0, 0, 1, 1}, {&__pyx_n_s_unpadded, __pyx_k_unpadded, sizeof(__pyx_k_unpadded), 0, 0, 1, 1}, {&__pyx_n_s_unzip, __pyx_k_unzip, sizeof(__pyx_k_unzip), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_update_averages, __pyx_k_update_averages, sizeof(__pyx_k_update_averages), 0, 0, 1, 1}, {&__pyx_n_s_util, __pyx_k_util, sizeof(__pyx_k_util), 0, 0, 1, 1}, {&__pyx_n_s_value, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, {&__pyx_n_s_variance, __pyx_k_variance, sizeof(__pyx_k_variance), 0, 0, 1, 1}, {&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1}, {&__pyx_n_s_where, __pyx_k_where, sizeof(__pyx_k_where), 0, 0, 1, 1}, {&__pyx_n_s_which, __pyx_k_which, sizeof(__pyx_k_which), 0, 0, 1, 1}, {&__pyx_n_s_which__bo, __pyx_k_which__bo, sizeof(__pyx_k_which__bo), 0, 0, 1, 1}, {&__pyx_n_s_whole_array, __pyx_k_whole_array, sizeof(__pyx_k_whole_array), 0, 0, 1, 1}, {&__pyx_n_s_workon, __pyx_k_workon, sizeof(__pyx_k_workon), 0, 0, 1, 1}, {&__pyx_n_s_wrap_backprop, __pyx_k_wrap_backprop, sizeof(__pyx_k_wrap_backprop), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xavier_uniform_init, __pyx_k_xavier_uniform_init, sizeof(__pyx_k_xavier_uniform_init), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_y_pred, __pyx_k_y_pred, sizeof(__pyx_k_y_pred), 0, 0, 1, 1}, {&__pyx_n_s_y_true, __pyx_k_y_true, sizeof(__pyx_k_y_true), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {&__pyx_n_s_zeros_aligned, __pyx_k_zeros_aligned, sizeof(__pyx_k_zeros_aligned), 0, 0, 1, 1}, {&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 19, __pyx_L1_error) __pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 57, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(0, 111, __pyx_L1_error) __pyx_builtin_max = __Pyx_GetBuiltinName(__pyx_n_s_max); if (!__pyx_builtin_max) __PYX_ERR(0, 181, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 185, __pyx_L1_error) __pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) __PYX_ERR(0, 223, __pyx_L1_error) __pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 290, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 272, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 855, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(3, 148, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(3, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(3, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(3, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(3, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "thinc/neural/ops.pyx":77 * # Copy left contexts. The last words aren't the left-context for anything. * cols[nW:, :nW] = seq[:-nW].reshape((-1, nW, I)) * cols[:, nW] = seq # <<<<<<<<<<<<<< * cols[:-nW, nW+1:] = seq[nW:].reshape((-1, nW, I)) * return cols.reshape((B, I * (2*nW+1))) */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); /* "thinc/neural/ops.pyx":100 * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * masked = [] * for i, mask in enumerate(masks): */ __pyx_tuple__5 = PyTuple_Pack(6, __pyx_n_s_gradient, __pyx_n_s_args, __pyx_n_s_kwargs, __pyx_n_s_masked, __pyx_n_s_i, __pyx_n_s_mask); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 100, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); __pyx_codeobj__6 = (PyObject*)__Pyx_PyCode_New(1, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARARGS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__5, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_finish_update, 100, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__6)) __PYX_ERR(0, 100, __pyx_L1_error) /* "thinc/neural/ops.pyx":99 * return X, lambda func: func * masks = [self.get_dropout_mask(x.shape, dropout) for x in X] * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * masked = [] */ __pyx_tuple__8 = PyTuple_Pack(3, __pyx_n_s_backprop, __pyx_n_s_finish_update, __pyx_n_s_finish_update); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); __pyx_codeobj__9 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__8, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_wrap_backprop, 99, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__9)) __PYX_ERR(0, 99, __pyx_L1_error) /* "thinc/neural/ops.pyx":127 * return x, lambda func: func * def wrap_backprop(backprop): * def finish_update(gradient, *args, **kwargs): # <<<<<<<<<<<<<< * return backprop(gradient * mask, *args, **kwargs) * return finish_update */ __pyx_tuple__10 = PyTuple_Pack(3, __pyx_n_s_gradient, __pyx_n_s_args, __pyx_n_s_kwargs); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 127, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARARGS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_finish_update, 127, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 127, __pyx_L1_error) /* "thinc/neural/ops.pyx":126 * if mask is None: * return x, lambda func: func * def wrap_backprop(backprop): # <<<<<<<<<<<<<< * def finish_update(gradient, *args, **kwargs): * return backprop(gradient * mask, *args, **kwargs) */ __pyx_tuple__13 = PyTuple_Pack(3, __pyx_n_s_backprop, __pyx_n_s_finish_update, __pyx_n_s_finish_update); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 126, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_wrap_backprop, 126, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 126, __pyx_L1_error) /* "thinc/neural/ops.pyx":138 * def flatten(self, X, dtype=None, pad=0): * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') # <<<<<<<<<<<<<< * xp = get_array_module(X[0]) * X = [x for x in X if x.size != 0] */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); __pyx_tuple__17 = PyTuple_Pack(1, __pyx_tuple__16); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 138, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "thinc/neural/ops.pyx":145 * for x in X: * padded.append( * xp.zeros((pad,) + x.shape[1:], dtype=x.dtype)) # <<<<<<<<<<<<<< * padded.append(x) * padded.append( */ __pyx_slice__18 = PySlice_New(__pyx_int_1, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(0, 145, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "thinc/neural/ops.pyx":199 * def unpad(padded): * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) # <<<<<<<<<<<<<< * for i in range(padded.shape[0]): * unpadded[indices[i]] = padded[i, :lengths[i]] */ __pyx_tuple__20 = PyTuple_Pack(2, __pyx_int_1, __pyx_int_0); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "thinc/neural/ops.pyx":197 * break * batch_size_at_t[t] = i * def unpad(padded): # <<<<<<<<<<<<<< * unpadded = [None] * len(lengths) * padded = self.xp.ascontiguousarray(padded.transpose((1, 0) + extra_dims)) */ __pyx_tuple__22 = PyTuple_Pack(3, __pyx_n_s_padded, __pyx_n_s_unpadded, __pyx_n_s_i); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 197, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_unpad, 197, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 197, __pyx_L1_error) /* "thinc/neural/ops.pyx":1091 * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( # <<<<<<<<<<<<<< * 'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps', * 'T param, T m, T v', */ __pyx_tuple__114 = PyTuple_Pack(4, __pyx_kp_s_T_grad_T_lr_T_one_minus_beta1_T, __pyx_kp_s_T_param_T_m_T_v, __pyx_kp_s_m_one_minus_beta1_grad_m_v_one_m, __pyx_n_s_adam); if (unlikely(!__pyx_tuple__114)) __PYX_ERR(0, 1091, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__114); __Pyx_GIVEREF(__pyx_tuple__114); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__119 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__119)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__119); __Pyx_GIVEREF(__pyx_tuple__119); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__120 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__120)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__120); __Pyx_GIVEREF(__pyx_tuple__120); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__121 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__121)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__121); __Pyx_GIVEREF(__pyx_tuple__121); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__122 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__122)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__122); __Pyx_GIVEREF(__pyx_tuple__122); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__123 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__123)) __PYX_ERR(1, 879, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__123); __Pyx_GIVEREF(__pyx_tuple__123); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__124 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__124)) __PYX_ERR(1, 1037, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__124); __Pyx_GIVEREF(__pyx_tuple__124); /* "../../../../opt/_internal/cpython-3.6.10/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__125 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__125)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__125); __Pyx_GIVEREF(__pyx_tuple__125); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__126 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__126)) __PYX_ERR(3, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__126); __Pyx_GIVEREF(__pyx_tuple__126); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__127 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__127)) __PYX_ERR(3, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__127); __Pyx_GIVEREF(__pyx_tuple__127); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__128 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__128)) __PYX_ERR(3, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__128); __Pyx_GIVEREF(__pyx_tuple__128); /* "View.MemoryView":176 * self.data = malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__129 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__129)) __PYX_ERR(3, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__129); __Pyx_GIVEREF(__pyx_tuple__129); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__130 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__130)) __PYX_ERR(3, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__130); __Pyx_GIVEREF(__pyx_tuple__130); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__131 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__131)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__131); __Pyx_GIVEREF(__pyx_tuple__131); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__132 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__132)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__132); __Pyx_GIVEREF(__pyx_tuple__132); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__133 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__133)) __PYX_ERR(3, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__133); __Pyx_GIVEREF(__pyx_tuple__133); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__134 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__134)) __PYX_ERR(3, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__134); __Pyx_GIVEREF(__pyx_tuple__134); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__135 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__135)) __PYX_ERR(3, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__135); __Pyx_GIVEREF(__pyx_tuple__135); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__136 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__136)) __PYX_ERR(3, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__136); __Pyx_GIVEREF(__pyx_tuple__136); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__137 = PyTuple_New(1); if (unlikely(!__pyx_tuple__137)) __PYX_ERR(3, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__137); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__137, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__137); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__138 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__138)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__138); __Pyx_GIVEREF(__pyx_tuple__138); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__139 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__139)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__139); __Pyx_GIVEREF(__pyx_tuple__139); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__140 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__140)) __PYX_ERR(3, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__140); __Pyx_GIVEREF(__pyx_tuple__140); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__141 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__141)) __PYX_ERR(3, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__141); __Pyx_GIVEREF(__pyx_tuple__141); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__142 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__142)) __PYX_ERR(3, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__142); __Pyx_GIVEREF(__pyx_tuple__142); /* "thinc/neural/ops.pyx":57 * * * class Ops(object): # <<<<<<<<<<<<<< * device = 'cpu' * xp = None */ __pyx_tuple__145 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__145)) __PYX_ERR(0, 57, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__145); __Pyx_GIVEREF(__pyx_tuple__145); /* "thinc/neural/ops.pyx":61 * xp = None * * def __init__(self, xp=None): # <<<<<<<<<<<<<< * if xp is not None: * self.xp = xp */ __pyx_tuple__146 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_xp); if (unlikely(!__pyx_tuple__146)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__146); __Pyx_GIVEREF(__pyx_tuple__146); __pyx_codeobj_ = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__146, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_init, 61, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj_)) __PYX_ERR(0, 61, __pyx_L1_error) __pyx_tuple__147 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__147)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__147); __Pyx_GIVEREF(__pyx_tuple__147); /* "thinc/neural/ops.pyx":65 * self.xp = xp * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_tuple__148 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_seq, __pyx_n_s_nW, __pyx_n_s_B, __pyx_n_s_I, __pyx_n_s_cols); if (unlikely(!__pyx_tuple__148)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__148); __Pyx_GIVEREF(__pyx_tuple__148); __pyx_codeobj__2 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__148, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_seq2col, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__2)) __PYX_ERR(0, 65, __pyx_L1_error) /* "thinc/neural/ops.pyx":81 * return cols.reshape((B, I * (2*nW+1))) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * # This is a test implementation that only supports nW=1 * assert nW == 1 */ __pyx_tuple__149 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_nW, __pyx_n_s_nF, __pyx_n_s_B, __pyx_n_s_I, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__149)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__149); __Pyx_GIVEREF(__pyx_tuple__149); __pyx_codeobj__4 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__149, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_seq2col, 81, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__4)) __PYX_ERR(0, 81, __pyx_L1_error) /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ __pyx_tuple__150 = PyTuple_Pack(11, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_dropout, __pyx_n_s_inplace, __pyx_n_s_masks, __pyx_n_s_wrap_backprop, __pyx_n_s_wrap_backprop, __pyx_n_s_i, __pyx_n_s_mask, __pyx_n_s_masked, __pyx_n_s_x); if (unlikely(!__pyx_tuple__150)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__150); __Pyx_GIVEREF(__pyx_tuple__150); __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(4, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__150, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_dropout_sequences, 95, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(0, 95, __pyx_L1_error) __pyx_tuple__151 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__151)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__151); __Pyx_GIVEREF(__pyx_tuple__151); /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ __pyx_tuple__152 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_dropout, __pyx_n_s_inplace, __pyx_n_s_mask, __pyx_n_s_wrap_backprop, __pyx_n_s_wrap_backprop); if (unlikely(!__pyx_tuple__152)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__152); __Pyx_GIVEREF(__pyx_tuple__152); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__152, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_dropout, 120, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 120, __pyx_L1_error) __pyx_tuple__153 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__153)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__153); __Pyx_GIVEREF(__pyx_tuple__153); /* "thinc/neural/ops.pyx":136 * return x * mask, wrap_backprop * * def flatten(self, X, dtype=None, pad=0): # <<<<<<<<<<<<<< * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') */ __pyx_tuple__154 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_dtype, __pyx_n_s_pad, __pyx_n_s_xp, __pyx_n_s_padded, __pyx_n_s_x, __pyx_n_s_result); if (unlikely(!__pyx_tuple__154)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__154); __Pyx_GIVEREF(__pyx_tuple__154); __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(4, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__154, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_flatten, 136, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(0, 136, __pyx_L1_error) __pyx_tuple__155 = PyTuple_Pack(2, ((PyObject *)Py_None), ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__155)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__155); __Pyx_GIVEREF(__pyx_tuple__155); /* "thinc/neural/ops.pyx":155 * return result * * def unflatten(self, X, lengths, pad=0): # <<<<<<<<<<<<<< * unflat = [] * pad = int(pad) */ __pyx_tuple__156 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths, __pyx_n_s_pad, __pyx_n_s_unflat, __pyx_n_s_length); if (unlikely(!__pyx_tuple__156)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__156); __Pyx_GIVEREF(__pyx_tuple__156); __pyx_codeobj__19 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__156, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_unflatten, 155, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__19)) __PYX_ERR(0, 155, __pyx_L1_error) __pyx_tuple__157 = PyTuple_Pack(1, ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__157)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__157); __Pyx_GIVEREF(__pyx_tuple__157); /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ __pyx_tuple__158 = PyTuple_Pack(18, __pyx_n_s_self, __pyx_n_s_seqs, __pyx_n_s_lengths_indices, __pyx_n_s_indices, __pyx_n_s_lengths, __pyx_n_s_nB, __pyx_n_s_nS, __pyx_n_s_arr, __pyx_n_s_arr_i, __pyx_n_s_length, __pyx_n_s_seqs_i, __pyx_n_s_extra_dims, __pyx_n_s_batch_size_at_t, __pyx_n_s_i, __pyx_n_s_t, __pyx_n_s_unpad, __pyx_n_s_unpad, __pyx_n_s_seq); if (unlikely(!__pyx_tuple__158)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__158); __Pyx_GIVEREF(__pyx_tuple__158); __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(2, 0, 18, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__158, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_square_sequences, 170, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(0, 170, __pyx_L1_error) /* "thinc/neural/ops.pyx":207 * @cython.boundscheck(False) * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): # <<<<<<<<<<<<<< * if drop is None or drop <= 0: * return None */ __pyx_tuple__159 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_shape, __pyx_n_s_drop, __pyx_n_s_coinflips, __pyx_n_s_mask); if (unlikely(!__pyx_tuple__159)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__159); __Pyx_GIVEREF(__pyx_tuple__159); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__159, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_get_dropout_mask, 207, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 207, __pyx_L1_error) /* "thinc/neural/ops.pyx":216 * return self.asarray(mask, dtype='float32') * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ __pyx_tuple__160 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_shape, __pyx_n_s_dtype, __pyx_n_s_nr_weight); if (unlikely(!__pyx_tuple__160)) __PYX_ERR(0, 216, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__160); __Pyx_GIVEREF(__pyx_tuple__160); __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(3, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__160, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_allocate, 216, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 216, __pyx_L1_error) __pyx_tuple__161 = PyTuple_Pack(1, ((PyObject*)__pyx_n_s_float32)); if (unlikely(!__pyx_tuple__161)) __PYX_ERR(0, 216, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__161); __Pyx_GIVEREF(__pyx_tuple__161); /* "thinc/neural/ops.pyx":222 * return self.xp.zeros(shape, dtype=dtype) * * def unzip(self, data): # <<<<<<<<<<<<<< * X, y = zip(*data) * return self.asarray(X), self.asarray(y) */ __pyx_tuple__162 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_X, __pyx_n_s_y); if (unlikely(!__pyx_tuple__162)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__162); __Pyx_GIVEREF(__pyx_tuple__162); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__162, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_unzip, 222, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 222, __pyx_L1_error) /* "thinc/neural/ops.pyx":226 * return self.asarray(X), self.asarray(y) * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ __pyx_tuple__163 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_dtype); if (unlikely(!__pyx_tuple__163)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__163); __Pyx_GIVEREF(__pyx_tuple__163); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__163, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_asarray, 226, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 226, __pyx_L1_error) __pyx_tuple__164 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__164)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__164); __Pyx_GIVEREF(__pyx_tuple__164); /* "thinc/neural/ops.pyx":240 * return self.xp.array(data) * * def batch_dot(self, x, y, transpose=False): # <<<<<<<<<<<<<< * # TODO: Fix this confusing inversion =/ * if not transpose: */ __pyx_tuple__165 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_transpose); if (unlikely(!__pyx_tuple__165)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__165); __Pyx_GIVEREF(__pyx_tuple__165); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__165, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_batch_dot, 240, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 240, __pyx_L1_error) __pyx_tuple__166 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__166)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__166); __Pyx_GIVEREF(__pyx_tuple__166); /* "thinc/neural/ops.pyx":247 * return self.xp.dot(x, y) * * def add_batch_outer(self, output, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * output += self.xp.tensordot(x, y, axes=[[0], [0]]) */ __pyx_tuple__167 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_output, __pyx_n_s_x, __pyx_n_s_y); if (unlikely(!__pyx_tuple__167)) __PYX_ERR(0, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__167); __Pyx_GIVEREF(__pyx_tuple__167); __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__167, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_add_batch_outer, 247, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 247, __pyx_L1_error) /* "thinc/neural/ops.pyx":251 * output += self.xp.tensordot(x, y, axes=[[0], [0]]) * * def norm(self, x): # <<<<<<<<<<<<<< * return self.xp.sqrt((x * x).sum()) * */ __pyx_tuple__168 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_x); if (unlikely(!__pyx_tuple__168)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__168); __Pyx_GIVEREF(__pyx_tuple__168); __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__168, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_norm, 251, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 251, __pyx_L1_error) /* "thinc/neural/ops.pyx":254 * return self.xp.sqrt((x * x).sum()) * * def dot(self, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * return self.xp.dot(x, y) */ __pyx_tuple__169 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y); if (unlikely(!__pyx_tuple__169)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__169); __Pyx_GIVEREF(__pyx_tuple__169); __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__169, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_dot, 254, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(0, 254, __pyx_L1_error) /* "thinc/neural/ops.pyx":258 * return self.xp.dot(x, y) * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * return self.gemm(signal, weights, trans2=True) + bias * */ __pyx_tuple__170 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_weights, __pyx_n_s_bias, __pyx_n_s_signal); if (unlikely(!__pyx_tuple__170)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__170); __Pyx_GIVEREF(__pyx_tuple__170); __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__170, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_affine, 258, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 258, __pyx_L1_error) /* "thinc/neural/ops.pyx":261 * return self.gemm(signal, weights, trans2=True) + bias * * def add_sum(self, out, to_sum): # <<<<<<<<<<<<<< * out += to_sum.sum(axis=0) * */ __pyx_tuple__171 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_out, __pyx_n_s_to_sum); if (unlikely(!__pyx_tuple__171)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__171); __Pyx_GIVEREF(__pyx_tuple__171); __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__171, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_add_sum, 261, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(0, 261, __pyx_L1_error) /* "thinc/neural/ops.pyx":264 * out += to_sum.sum(axis=0) * * def argmax(self, x, axis=-1): # <<<<<<<<<<<<<< * return self.xp.argmax(x, axis=axis) * */ __pyx_tuple__172 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_axis); if (unlikely(!__pyx_tuple__172)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__172); __Pyx_GIVEREF(__pyx_tuple__172); __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__172, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_argmax, 264, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(0, 264, __pyx_L1_error) __pyx_tuple__173 = PyTuple_Pack(1, ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__173)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__173); __Pyx_GIVEREF(__pyx_tuple__173); /* "thinc/neural/ops.pyx":267 * return self.xp.argmax(x, axis=axis) * * def sigmoid(self, X): # <<<<<<<<<<<<<< * return 1./(1. + self.xp.exp(-X)) * */ __pyx_tuple__174 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_X); if (unlikely(!__pyx_tuple__174)) __PYX_ERR(0, 267, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__174); __Pyx_GIVEREF(__pyx_tuple__174); __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__174, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_sigmoid, 267, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 267, __pyx_L1_error) /* "thinc/neural/ops.pyx":270 * return 1./(1. + self.xp.exp(-X)) * * def dsigmoid(self, y): # <<<<<<<<<<<<<< * return y*(1-y) * */ __pyx_tuple__175 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_y); if (unlikely(!__pyx_tuple__175)) __PYX_ERR(0, 270, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__175); __Pyx_GIVEREF(__pyx_tuple__175); __pyx_codeobj__36 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__175, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_dsigmoid, 270, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__36)) __PYX_ERR(0, 270, __pyx_L1_error) /* "thinc/neural/ops.pyx":273 * return y*(1-y) * * def dtanh(self, y): # <<<<<<<<<<<<<< * return 1-y**2 * */ __pyx_tuple__176 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_y); if (unlikely(!__pyx_tuple__176)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__176); __Pyx_GIVEREF(__pyx_tuple__176); __pyx_codeobj__37 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__176, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_dtanh, 273, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__37)) __PYX_ERR(0, 273, __pyx_L1_error) /* "thinc/neural/ops.pyx":276 * return 1-y**2 * * def softmax(self, x, inplace=False, axis=-1): # <<<<<<<<<<<<<< * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) */ __pyx_tuple__177 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_inplace, __pyx_n_s_axis, __pyx_n_s_shape, __pyx_n_s_maxes, __pyx_n_s_shifted, __pyx_n_s_new_x); if (unlikely(!__pyx_tuple__177)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__177); __Pyx_GIVEREF(__pyx_tuple__177); __pyx_codeobj__38 = (PyObject*)__Pyx_PyCode_New(4, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__177, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_softmax, 276, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__38)) __PYX_ERR(0, 276, __pyx_L1_error) __pyx_tuple__178 = PyTuple_Pack(2, ((PyObject *)Py_False), ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__178)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__178); __Pyx_GIVEREF(__pyx_tuple__178); /* "thinc/neural/ops.pyx":288 * return new_x * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): # <<<<<<<<<<<<<< * if Xs.ndim >= 3: * raise NotImplementedError( */ __pyx_tuple__179 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_Xs, __pyx_n_s_lengths, __pyx_n_s_inplace, __pyx_n_s_axis, __pyx_n_s_new_x, __pyx_n_s_summed); if (unlikely(!__pyx_tuple__179)) __PYX_ERR(0, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__179); __Pyx_GIVEREF(__pyx_tuple__179); __pyx_codeobj__39 = (PyObject*)__Pyx_PyCode_New(5, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__179, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_softmax_sequences, 288, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__39)) __PYX_ERR(0, 288, __pyx_L1_error) __pyx_tuple__180 = PyTuple_Pack(2, ((PyObject *)Py_False), ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__180)) __PYX_ERR(0, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__180); __Pyx_GIVEREF(__pyx_tuple__180); /* "thinc/neural/ops.pyx":303 * return new_x * * def backprop_softmax(self, Y, dY, axis=-1): # <<<<<<<<<<<<<< * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) */ __pyx_tuple__181 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_Y, __pyx_n_s_dY, __pyx_n_s_axis, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__181)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__181); __Pyx_GIVEREF(__pyx_tuple__181); __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__181, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_softmax, 303, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(0, 303, __pyx_L1_error) __pyx_tuple__182 = PyTuple_Pack(1, ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__182)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__182); __Pyx_GIVEREF(__pyx_tuple__182); /* "thinc/neural/ops.pyx":308 * return dX * * def backprop_softmax_sequences(self, dy, y, lengths): # <<<<<<<<<<<<<< * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) */ __pyx_tuple__183 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_dy, __pyx_n_s_y, __pyx_n_s_lengths, __pyx_n_s_dx, __pyx_n_s_sumdx); if (unlikely(!__pyx_tuple__183)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__183); __Pyx_GIVEREF(__pyx_tuple__183); __pyx_codeobj__41 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__183, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_softmax_sequences, 308, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__41)) __PYX_ERR(0, 308, __pyx_L1_error) /* "thinc/neural/ops.pyx":314 * return dx * * def expand_dims(self, a, axis=-1): # <<<<<<<<<<<<<< * return self.xp.expand_dims(a, axis=axis) * */ __pyx_tuple__184 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_a, __pyx_n_s_axis); if (unlikely(!__pyx_tuple__184)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__184); __Pyx_GIVEREF(__pyx_tuple__184); __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__184, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_expand_dims, 314, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 314, __pyx_L1_error) __pyx_tuple__185 = PyTuple_Pack(1, ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__185)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__185); __Pyx_GIVEREF(__pyx_tuple__185); /* "thinc/neural/ops.pyx":317 * return self.xp.expand_dims(a, axis=axis) * * def clip_low(self, x, value, inplace=False): # <<<<<<<<<<<<<< * if inplace: * return self.xp.maximum(x, value, out=x) */ __pyx_tuple__186 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_value, __pyx_n_s_inplace); if (unlikely(!__pyx_tuple__186)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__186); __Pyx_GIVEREF(__pyx_tuple__186); __pyx_codeobj__43 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__186, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_clip_low, 317, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__43)) __PYX_ERR(0, 317, __pyx_L1_error) __pyx_tuple__187 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__187)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__187); __Pyx_GIVEREF(__pyx_tuple__187); /* "thinc/neural/ops.pyx":323 * return self.xp.maximum(x, value) * * def take_which(self, x, which, axis=-1): # <<<<<<<<<<<<<< * output = self.allocate(which.shape) * for i in range(x.shape[axis]): */ __pyx_tuple__188 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_which, __pyx_n_s_axis, __pyx_n_s_output, __pyx_n_s_i); if (unlikely(!__pyx_tuple__188)) __PYX_ERR(0, 323, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__188); __Pyx_GIVEREF(__pyx_tuple__188); __pyx_codeobj__44 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__188, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_take_which, 323, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__44)) __PYX_ERR(0, 323, __pyx_L1_error) __pyx_tuple__189 = PyTuple_Pack(1, ((PyObject *)__pyx_int_neg_1)); if (unlikely(!__pyx_tuple__189)) __PYX_ERR(0, 323, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__189); __Pyx_GIVEREF(__pyx_tuple__189); /* "thinc/neural/ops.pyx":329 * return output * * def backprop_take(self, dX__bo, which__bo, nP): # <<<<<<<<<<<<<< * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): */ __pyx_tuple__190 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_dX__bo, __pyx_n_s_which__bo, __pyx_n_s_nP, __pyx_n_s_dX__bop, __pyx_n_s_i); if (unlikely(!__pyx_tuple__190)) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__190); __Pyx_GIVEREF(__pyx_tuple__190); __pyx_codeobj__45 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__190, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_take, 329, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__45)) __PYX_ERR(0, 329, __pyx_L1_error) /* "thinc/neural/ops.pyx":335 * return dX__bop * * def lstm(self, output, cells, act_pieces, prev): # <<<<<<<<<<<<<< * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) */ __pyx_tuple__191 = PyTuple_Pack(9, __pyx_n_s_self, __pyx_n_s_output, __pyx_n_s_cells, __pyx_n_s_act_pieces, __pyx_n_s_prev, __pyx_n_s_hf, __pyx_n_s_hi, __pyx_n_s_ho, __pyx_n_s_hc); if (unlikely(!__pyx_tuple__191)) __PYX_ERR(0, 335, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__191); __Pyx_GIVEREF(__pyx_tuple__191); __pyx_codeobj__46 = (PyObject*)__Pyx_PyCode_New(5, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__191, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_lstm, 335, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__46)) __PYX_ERR(0, 335, __pyx_L1_error) /* "thinc/neural/ops.pyx":345 * output[:] = self.xp.tanh(cells) * ho * * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, # <<<<<<<<<<<<<< * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces */ __pyx_tuple__192 = PyTuple_Pack(18, __pyx_n_s_self, __pyx_n_s_d_cells, __pyx_n_s_d_prev, __pyx_n_s_d_gate_pieces, __pyx_n_s_d_output, __pyx_n_s_gate_pieces, __pyx_n_s_cells, __pyx_n_s_prev, __pyx_n_s_hf, __pyx_n_s_hi, __pyx_n_s_ho, __pyx_n_s_hc, __pyx_n_s_dhf, __pyx_n_s_dhi, __pyx_n_s_dho, __pyx_n_s_dhc, __pyx_n_s_ct, __pyx_n_s_dc); if (unlikely(!__pyx_tuple__192)) __PYX_ERR(0, 345, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__192); __Pyx_GIVEREF(__pyx_tuple__192); __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(8, 0, 18, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__192, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_lstm, 345, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 345, __pyx_L1_error) /* "thinc/neural/ops.pyx":366 * copy_array(d_cells, dc) * * def softplus(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) */ __pyx_tuple__193 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_xp, __pyx_n_s_log1p_exp, __pyx_n_s_indices); if (unlikely(!__pyx_tuple__193)) __PYX_ERR(0, 366, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__193); __Pyx_GIVEREF(__pyx_tuple__193); __pyx_codeobj__48 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__193, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_softplus, 366, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__48)) __PYX_ERR(0, 366, __pyx_L1_error) __pyx_tuple__194 = PyTuple_Pack(2, ((PyObject*)__pyx_float_20_), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__194)) __PYX_ERR(0, 366, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__194); __Pyx_GIVEREF(__pyx_tuple__194); /* "thinc/neural/ops.pyx":377 * return out * * def backprop_softplus(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * if out is None: */ __pyx_tuple__195 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_xp, __pyx_n_s_indices); if (unlikely(!__pyx_tuple__195)) __PYX_ERR(0, 377, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__195); __Pyx_GIVEREF(__pyx_tuple__195); __pyx_codeobj__49 = (PyObject*)__Pyx_PyCode_New(5, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__195, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_softplus, 377, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__49)) __PYX_ERR(0, 377, __pyx_L1_error) __pyx_tuple__196 = PyTuple_Pack(2, ((PyObject *)__pyx_int_20), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__196)) __PYX_ERR(0, 377, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__196); __Pyx_GIVEREF(__pyx_tuple__196); /* "thinc/neural/ops.pyx":387 * return out * * def mish(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) */ __pyx_tuple__197 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_Xsoft, __pyx_n_s_Y); if (unlikely(!__pyx_tuple__197)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__197); __Pyx_GIVEREF(__pyx_tuple__197); __pyx_codeobj__50 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__197, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_mish, 387, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__50)) __PYX_ERR(0, 387, __pyx_L1_error) __pyx_tuple__198 = PyTuple_Pack(2, ((PyObject*)__pyx_float_20_), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__198)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__198); __Pyx_GIVEREF(__pyx_tuple__198); /* "thinc/neural/ops.pyx":393 * return Y * * def backprop_mish(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * indices = X < threshold */ __pyx_tuple__199 = PyTuple_Pack(12, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_xp, __pyx_n_s_indices, __pyx_n_s_Xsub, __pyx_n_s_dYsub, __pyx_n_s_omega, __pyx_n_s_delta_2, __pyx_n_s_dXsub); if (unlikely(!__pyx_tuple__199)) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__199); __Pyx_GIVEREF(__pyx_tuple__199); __pyx_codeobj__51 = (PyObject*)__Pyx_PyCode_New(5, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__199, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_mish, 393, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__51)) __PYX_ERR(0, 393, __pyx_L1_error) __pyx_tuple__200 = PyTuple_Pack(2, ((PyObject *)__pyx_int_20), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__200)) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__200); __Pyx_GIVEREF(__pyx_tuple__200); /* "thinc/neural/ops.pyx":412 * return out * * def xavier_uniform_init(self, W, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ __pyx_tuple__201 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_W, __pyx_n_s_inplace, __pyx_n_s_scale); if (unlikely(!__pyx_tuple__201)) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__201); __Pyx_GIVEREF(__pyx_tuple__201); __pyx_codeobj__52 = (PyObject*)__Pyx_PyCode_New(3, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__201, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_xavier_uniform_init, 412, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__52)) __PYX_ERR(0, 412, __pyx_L1_error) __pyx_tuple__202 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__202)) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__202); __Pyx_GIVEREF(__pyx_tuple__202); /* "thinc/neural/ops.pyx":422 * return self.xp.random.uniform(-scale, scale, W.shape) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ __pyx_tuple__203 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_W, __pyx_n_s_fan_in, __pyx_n_s_inplace, __pyx_n_s_scale, __pyx_n_s_inits); if (unlikely(!__pyx_tuple__203)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__203); __Pyx_GIVEREF(__pyx_tuple__203); __pyx_codeobj__53 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__203, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_normal_init, 422, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__53)) __PYX_ERR(0, 422, __pyx_L1_error) __pyx_tuple__204 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__204)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__204); __Pyx_GIVEREF(__pyx_tuple__204); /* "thinc/neural/ops.pyx":434 * return inits * * def he_normal_init(self, shape, fan_in): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(2. / fan_in) * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) */ __pyx_tuple__205 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_shape, __pyx_n_s_fan_in, __pyx_n_s_scale); if (unlikely(!__pyx_tuple__205)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__205); __Pyx_GIVEREF(__pyx_tuple__205); __pyx_codeobj__54 = (PyObject*)__Pyx_PyCode_New(3, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__205, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_he_normal_init, 434, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__54)) __PYX_ERR(0, 434, __pyx_L1_error) /* "thinc/neural/ops.pyx":438 * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) * * def update_averages(self, ema, weights, t, max_decay=0.9999): # <<<<<<<<<<<<<< * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: */ __pyx_tuple__206 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_ema, __pyx_n_s_weights, __pyx_n_s_t, __pyx_n_s_max_decay, __pyx_n_s_decay); if (unlikely(!__pyx_tuple__206)) __PYX_ERR(0, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__206); __Pyx_GIVEREF(__pyx_tuple__206); __pyx_codeobj__55 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__206, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_update_averages, 438, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__55)) __PYX_ERR(0, 438, __pyx_L1_error) __pyx_tuple__207 = PyTuple_Pack(1, ((PyObject*)__pyx_float_0_9999)); if (unlikely(!__pyx_tuple__207)) __PYX_ERR(0, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__207); __Pyx_GIVEREF(__pyx_tuple__207); /* "thinc/neural/ops.pyx":444 * ema -= (1-decay) * (ema - weights) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * mom1 *= beta1 */ __pyx_tuple__208 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_weights, __pyx_n_s_gradient, __pyx_n_s_mom1, __pyx_n_s_mom2, __pyx_n_s_beta1, __pyx_n_s_beta2, __pyx_n_s_eps, __pyx_n_s_learn_rate, __pyx_n_s_mod_rate); if (unlikely(!__pyx_tuple__208)) __PYX_ERR(0, 444, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__208); __Pyx_GIVEREF(__pyx_tuple__208); __pyx_codeobj__56 = (PyObject*)__Pyx_PyCode_New(10, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__208, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_adam, 444, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__56)) __PYX_ERR(0, 444, __pyx_L1_error) __pyx_tuple__209 = PyTuple_Pack(1, ((PyObject*)__pyx_float_1_)); if (unlikely(!__pyx_tuple__209)) __PYX_ERR(0, 444, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__209); __Pyx_GIVEREF(__pyx_tuple__209); /* "thinc/neural/ops.pyx":455 * gradient.fill(0) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ __pyx_tuple__210 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_gradient, __pyx_n_s_threshold, __pyx_n_s_xp, __pyx_n_s_grad_norm); if (unlikely(!__pyx_tuple__210)) __PYX_ERR(0, 455, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__210); __Pyx_GIVEREF(__pyx_tuple__210); __pyx_codeobj__57 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__210, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_clip_gradient, 455, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__57)) __PYX_ERR(0, 455, __pyx_L1_error) /* "thinc/neural/ops.pyx":461 * gradient *= threshold / grad_norm * * def logloss(self, y_true, y_pred): # <<<<<<<<<<<<<< * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) */ __pyx_tuple__211 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_y_true, __pyx_n_s_y_pred, __pyx_n_s_log_yp, __pyx_n_s_loss); if (unlikely(!__pyx_tuple__211)) __PYX_ERR(0, 461, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__211); __Pyx_GIVEREF(__pyx_tuple__211); __pyx_codeobj__58 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__211, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_logloss, 461, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__58)) __PYX_ERR(0, 461, __pyx_L1_error) /* "thinc/neural/ops.pyx":471 * xp = numpy * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ __pyx_tuple__212 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_data, __pyx_n_s_dtype); if (unlikely(!__pyx_tuple__212)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__212); __Pyx_GIVEREF(__pyx_tuple__212); __pyx_codeobj__59 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__212, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_asarray, 471, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__59)) __PYX_ERR(0, 471, __pyx_L1_error) __pyx_tuple__213 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__213)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__213); __Pyx_GIVEREF(__pyx_tuple__213); /* "thinc/neural/ops.pyx":488 * * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ __pyx_tuple__214 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_shape, __pyx_n_s_dtype); if (unlikely(!__pyx_tuple__214)) __PYX_ERR(0, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__214); __Pyx_GIVEREF(__pyx_tuple__214); __pyx_codeobj__60 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__214, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_allocate, 488, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__60)) __PYX_ERR(0, 488, __pyx_L1_error) __pyx_tuple__215 = PyTuple_Pack(1, ((PyObject*)__pyx_n_s_float32)); if (unlikely(!__pyx_tuple__215)) __PYX_ERR(0, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__215); __Pyx_GIVEREF(__pyx_tuple__215); /* "thinc/neural/ops.pyx":493 * return self.xp.zeros(shape, dtype=dtype) * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): # <<<<<<<<<<<<<< * VecVec.add_i(x.data, * y.data, scale, x.shape[0]) */ __pyx_tuple__216 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_scale); if (unlikely(!__pyx_tuple__216)) __PYX_ERR(0, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__216); __Pyx_GIVEREF(__pyx_tuple__216); __pyx_codeobj__61 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__216, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_inplace_add, 493, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__61)) __PYX_ERR(0, 493, __pyx_L1_error) /* "thinc/neural/ops.pyx":497 * y.data, scale, x.shape[0]) * * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): # <<<<<<<<<<<<<< * assert x.shape[0] == y.shape[0] * assert x.shape[2] == y.shape[1] */ __pyx_tuple__217 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_out, __pyx_n_s_out_array, __pyx_n_s_i); if (unlikely(!__pyx_tuple__217)) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__217); __Pyx_GIVEREF(__pyx_tuple__217); __pyx_codeobj__62 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__217, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_matmul, 497, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__62)) __PYX_ERR(0, 497, __pyx_L1_error) __pyx_tuple__218 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__218)) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__218); __Pyx_GIVEREF(__pyx_tuple__218); /* "thinc/neural/ops.pyx":512 * return out_array * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, # <<<<<<<<<<<<<< * out=None): * cdef int m */ __pyx_tuple__219 = PyTuple_Pack(9, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_trans1, __pyx_n_s_trans2, __pyx_n_s_out, __pyx_n_s_m, __pyx_n_s_n, __pyx_n_s_out_array); if (unlikely(!__pyx_tuple__219)) __PYX_ERR(0, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__219); __Pyx_GIVEREF(__pyx_tuple__219); __pyx_codeobj__63 = (PyObject*)__Pyx_PyCode_New(6, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__219, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_gemm, 512, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__63)) __PYX_ERR(0, 512, __pyx_L1_error) __pyx_tuple__220 = PyTuple_Pack(3, ((PyObject *)Py_False), ((PyObject *)Py_False), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__220)) __PYX_ERR(0, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__220); __Pyx_GIVEREF(__pyx_tuple__220); /* "thinc/neural/ops.pyx":534 * return out_array * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias */ __pyx_tuple__221 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_weights, __pyx_n_s_bias, __pyx_n_s_signal, __pyx_n_s_dotted); if (unlikely(!__pyx_tuple__221)) __PYX_ERR(0, 534, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__221); __Pyx_GIVEREF(__pyx_tuple__221); __pyx_codeobj__64 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__221, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_affine, 534, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__64)) __PYX_ERR(0, 534, __pyx_L1_error) /* "thinc/neural/ops.pyx":539 * return dotted * * def elu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ __pyx_tuple__222 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_inplace, __pyx_n_s_data, __pyx_n_s_size, __pyx_n_s_i); if (unlikely(!__pyx_tuple__222)) __PYX_ERR(0, 539, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__222); __Pyx_GIVEREF(__pyx_tuple__222); __pyx_codeobj__65 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__222, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_elu, 539, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__65)) __PYX_ERR(0, 539, __pyx_L1_error) __pyx_tuple__223 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__223)) __PYX_ERR(0, 539, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__223); __Pyx_GIVEREF(__pyx_tuple__223); /* "thinc/neural/ops.pyx":546 * data[i] = expf(data[i])-1. * * def selu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ __pyx_tuple__224 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_inplace, __pyx_n_s_data, __pyx_n_s_size, __pyx_n_s_scale, __pyx_n_s_alpha, __pyx_n_s_i); if (unlikely(!__pyx_tuple__224)) __PYX_ERR(0, 546, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__224); __Pyx_GIVEREF(__pyx_tuple__224); __pyx_codeobj__66 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__224, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_selu, 546, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__66)) __PYX_ERR(0, 546, __pyx_L1_error) __pyx_tuple__225 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__225)) __PYX_ERR(0, 546, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__225); __Pyx_GIVEREF(__pyx_tuple__225); /* "thinc/neural/ops.pyx":556 * data[i] *= scale * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ __pyx_tuple__226 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_delta, __pyx_n_s_signal_in, __pyx_n_s_inplace, __pyx_n_s_size, __pyx_n_s_delta_2, __pyx_n_s_signal_in_2, __pyx_n_s_scale, __pyx_n_s_alpha, __pyx_n_s_i); if (unlikely(!__pyx_tuple__226)) __PYX_ERR(0, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__226); __Pyx_GIVEREF(__pyx_tuple__226); __pyx_codeobj__67 = (PyObject*)__Pyx_PyCode_New(4, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__226, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_selu, 556, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__67)) __PYX_ERR(0, 556, __pyx_L1_error) __pyx_tuple__227 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__227)) __PYX_ERR(0, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__227); __Pyx_GIVEREF(__pyx_tuple__227); /* "thinc/neural/ops.pyx":570 * delta[i] *= alpha * expf(signal_in[i]) * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the ELU transformation */ __pyx_tuple__228 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_delta, __pyx_n_s_signal_out, __pyx_n_s_inplace, __pyx_n_s_size, __pyx_n_s_delta_2, __pyx_n_s_signal_out_2, __pyx_n_s_i); if (unlikely(!__pyx_tuple__228)) __PYX_ERR(0, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__228); __Pyx_GIVEREF(__pyx_tuple__228); __pyx_codeobj__68 = (PyObject*)__Pyx_PyCode_New(4, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__228, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_elu, 570, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__68)) __PYX_ERR(0, 570, __pyx_L1_error) __pyx_tuple__229 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__229)) __PYX_ERR(0, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__229); __Pyx_GIVEREF(__pyx_tuple__229); /* "thinc/neural/ops.pyx":582 * delta[i] *= signal_out[i] + 1. * * def relu(self, ndarray X, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data */ __pyx_tuple__230 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_inplace, __pyx_n_s_out, __pyx_n_s_data, __pyx_n_s_size, __pyx_n_s_i); if (unlikely(!__pyx_tuple__230)) __PYX_ERR(0, 582, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__230); __Pyx_GIVEREF(__pyx_tuple__230); __pyx_codeobj__69 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__230, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_relu, 582, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__69)) __PYX_ERR(0, 582, __pyx_L1_error) __pyx_tuple__231 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__231)) __PYX_ERR(0, 582, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__231); __Pyx_GIVEREF(__pyx_tuple__231); /* "thinc/neural/ops.pyx":591 * return out * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size */ __pyx_tuple__232 = PyTuple_Pack(9, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_Y, __pyx_n_s_inplace, __pyx_n_s_dX, __pyx_n_s_size, __pyx_n_s_dX_ptr, __pyx_n_s_Y_ptr, __pyx_n_s_i); if (unlikely(!__pyx_tuple__232)) __PYX_ERR(0, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__232); __Pyx_GIVEREF(__pyx_tuple__232); __pyx_codeobj__70 = (PyObject*)__Pyx_PyCode_New(4, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__232, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_relu, 591, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__70)) __PYX_ERR(0, 591, __pyx_L1_error) __pyx_tuple__233 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__233)) __PYX_ERR(0, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__233); __Pyx_GIVEREF(__pyx_tuple__233); /* "thinc/neural/ops.pyx":601 * return dX * * def maxout(self, const float[:, :, ::1] py_cands): # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] */ __pyx_tuple__234 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_py_cands, __pyx_n_s_mem, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_P, __pyx_n_s_best, __pyx_n_s_which); if (unlikely(!__pyx_tuple__234)) __PYX_ERR(0, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__234); __Pyx_GIVEREF(__pyx_tuple__234); __pyx_codeobj__71 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__234, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_maxout, 601, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__71)) __PYX_ERR(0, 601, __pyx_L1_error) /* "thinc/neural/ops.pyx":613 * return best, which * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): # <<<<<<<<<<<<<< * cdef int B = dX__bo.shape[0] * cdef int O = dX__bo.shape[1] */ __pyx_tuple__235 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_dX__bo, __pyx_n_s_which__bo, __pyx_n_s_P, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_dX__bop); if (unlikely(!__pyx_tuple__235)) __PYX_ERR(0, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__235); __Pyx_GIVEREF(__pyx_tuple__235); __pyx_codeobj__72 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__235, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_maxout, 613, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__72)) __PYX_ERR(0, 613, __pyx_L1_error) /* "thinc/neural/ops.pyx":622 * return dX__bop * * def mish(self, const float[:, ::1] X, threshold=5, out=None): # <<<<<<<<<<<<<< * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") */ __pyx_tuple__236 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_shape, __pyx_n_s_Y, __pyx_n_s_i); if (unlikely(!__pyx_tuple__236)) __PYX_ERR(0, 622, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__236); __Pyx_GIVEREF(__pyx_tuple__236); __pyx_codeobj__73 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__236, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_mish, 622, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__73)) __PYX_ERR(0, 622, __pyx_L1_error) __pyx_tuple__237 = PyTuple_Pack(2, ((PyObject *)__pyx_int_5), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__237)) __PYX_ERR(0, 622, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__237); __Pyx_GIVEREF(__pyx_tuple__237); /* "thinc/neural/ops.pyx":633 * return Y * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, # <<<<<<<<<<<<<< * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] */ __pyx_tuple__238 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out, __pyx_n_s_shape, __pyx_n_s_dX, __pyx_n_s_i); if (unlikely(!__pyx_tuple__238)) __PYX_ERR(0, 633, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__238); __Pyx_GIVEREF(__pyx_tuple__238); __pyx_codeobj__74 = (PyObject*)__Pyx_PyCode_New(5, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__238, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_mish, 633, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__74)) __PYX_ERR(0, 633, __pyx_L1_error) __pyx_tuple__239 = PyTuple_Pack(2, ((PyObject *)__pyx_int_5), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__239)) __PYX_ERR(0, 633, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__239); __Pyx_GIVEREF(__pyx_tuple__239); /* "thinc/neural/ops.pyx":658 * # cells.shape[0], cells.shape[1]) * * def seq2col(self, const float[:, ::1] seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_tuple__240 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_seq, __pyx_n_s_nW, __pyx_n_s_B, __pyx_n_s_I, __pyx_n_s_cols); if (unlikely(!__pyx_tuple__240)) __PYX_ERR(0, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__240); __Pyx_GIVEREF(__pyx_tuple__240); __pyx_codeobj__75 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__240, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_seq2col, 658, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__75)) __PYX_ERR(0, 658, __pyx_L1_error) /* "thinc/neural/ops.pyx":669 * return cols * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): # <<<<<<<<<<<<<< * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 */ __pyx_tuple__241 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_nW, __pyx_n_s_B, __pyx_n_s_nF, __pyx_n_s_I, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__241)) __PYX_ERR(0, 669, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__241); __Pyx_GIVEREF(__pyx_tuple__241); __pyx_codeobj__76 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__241, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_seq2col, 669, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__76)) __PYX_ERR(0, 669, __pyx_L1_error) /* "thinc/neural/ops.pyx":677 * return dX * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): # <<<<<<<<<<<<<< * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') */ __pyx_tuple__242 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_mapping, __pyx_n_s_ids_mv, __pyx_n_s_value, __pyx_n_s_ids, __pyx_n_s_output_arr, __pyx_n_s_output, __pyx_n_s_key, __pyx_n_s_i, __pyx_n_s_mapped); if (unlikely(!__pyx_tuple__242)) __PYX_ERR(0, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__242); __Pyx_GIVEREF(__pyx_tuple__242); __pyx_codeobj__77 = (PyObject*)__Pyx_PyCode_New(4, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__242, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_remap_ids, 677, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__77)) __PYX_ERR(0, 677, __pyx_L1_error) /* "thinc/neural/ops.pyx":696 * return output_arr * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): # <<<<<<<<<<<<<< * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') */ __pyx_tuple__243 = PyTuple_Pack(15, __pyx_n_s_self, __pyx_n_s_contig_array, __pyx_n_s_to_add, __pyx_n_s_starts, __pyx_n_s_contig_to_add, __pyx_n_s_contig_starts, __pyx_n_s_to_add_2, __pyx_n_s_whole_array, __pyx_n_s_starts_2, __pyx_n_s_n_slice, __pyx_n_s_length, __pyx_n_s_stride, __pyx_n_s_start, __pyx_n_s_workon, __pyx_n_s_i); if (unlikely(!__pyx_tuple__243)) __PYX_ERR(0, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__243); __Pyx_GIVEREF(__pyx_tuple__243); __pyx_codeobj__78 = (PyObject*)__Pyx_PyCode_New(4, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__243, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_increment_slices, 696, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__78)) __PYX_ERR(0, 696, __pyx_L1_error) /* "thinc/neural/ops.pyx":713 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, const uint64_t[::1] ids, uint32_t seed): # <<<<<<<<<<<<<< * '''Hash a sequence of 64-bit keys into a table with 4 32-bit keys''' * # Written to mirror the GPU implementation */ __pyx_tuple__244 = PyTuple_Pack(11, __pyx_n_s_self, __pyx_n_s_ids, __pyx_n_s_seed, __pyx_n_s_keys_2, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_entropy, __pyx_n_s_n_items, __pyx_n_s_in_size, __pyx_n_s_src, __pyx_n_s_dest); if (unlikely(!__pyx_tuple__244)) __PYX_ERR(0, 713, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__244); __Pyx_GIVEREF(__pyx_tuple__244); __pyx_codeobj__79 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__244, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_hash, 713, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__79)) __PYX_ERR(0, 713, __pyx_L1_error) /* "thinc/neural/ops.pyx":731 * return keys * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_tuple__245 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_mem, __pyx_n_s_means); if (unlikely(!__pyx_tuple__245)) __PYX_ERR(0, 731, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__245); __Pyx_GIVEREF(__pyx_tuple__245); __pyx_codeobj__80 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__245, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_mean_pool, 731, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__80)) __PYX_ERR(0, 731, __pyx_L1_error) /* "thinc/neural/ops.pyx":743 * return cpu_floats_ptr2array(means, (B, O)) * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_tuple__246 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_mem, __pyx_n_s_sums); if (unlikely(!__pyx_tuple__246)) __PYX_ERR(0, 743, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__246); __Pyx_GIVEREF(__pyx_tuple__246); __pyx_codeobj__81 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__246, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_sum_pool, 743, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__81)) __PYX_ERR(0, 743, __pyx_L1_error) /* "thinc/neural/ops.pyx":755 * return cpu_floats_ptr2array(sums, (B, O)) * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] */ __pyx_tuple__247 = PyTuple_Pack(9, __pyx_n_s_self, __pyx_n_s_d_means, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_length, __pyx_n_s_mem, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__247)) __PYX_ERR(0, 755, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__247); __Pyx_GIVEREF(__pyx_tuple__247); __pyx_codeobj__82 = (PyObject*)__Pyx_PyCode_New(3, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__247, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_mean_pool, 755, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__82)) __PYX_ERR(0, 755, __pyx_L1_error) /* "thinc/neural/ops.pyx":769 * return cpu_floats_ptr2array(dX, (T, O)) * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] */ __pyx_tuple__248 = PyTuple_Pack(9, __pyx_n_s_self, __pyx_n_s_d_sums, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_length, __pyx_n_s_mem, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__248)) __PYX_ERR(0, 769, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__248); __Pyx_GIVEREF(__pyx_tuple__248); __pyx_codeobj__83 = (PyObject*)__Pyx_PyCode_New(3, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__248, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_sum_pool, 769, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__83)) __PYX_ERR(0, 769, __pyx_L1_error) /* "thinc/neural/ops.pyx":783 * * * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_tuple__249 = PyTuple_Pack(11, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_mem, __pyx_n_s_maxes, __pyx_n_s_which, __pyx_n_s_py_best, __pyx_n_s_py_which); if (unlikely(!__pyx_tuple__249)) __PYX_ERR(0, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__249); __Pyx_GIVEREF(__pyx_tuple__249); __pyx_codeobj__84 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__249, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_max_pool, 783, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__84)) __PYX_ERR(0, 783, __pyx_L1_error) /* "thinc/neural/ops.pyx":799 * return py_best, py_which * * def backprop_max_pool(self, const float[:, ::1] d_maxes, # <<<<<<<<<<<<<< * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] */ __pyx_tuple__250 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_d_maxes, __pyx_n_s_which, __pyx_n_s_lengths, __pyx_n_s_B, __pyx_n_s_O, __pyx_n_s_T, __pyx_n_s_length, __pyx_n_s_mem, __pyx_n_s_dX); if (unlikely(!__pyx_tuple__250)) __PYX_ERR(0, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__250); __Pyx_GIVEREF(__pyx_tuple__250); __pyx_codeobj__85 = (PyObject*)__Pyx_PyCode_New(4, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__250, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_max_pool, 799, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__85)) __PYX_ERR(0, 799, __pyx_L1_error) /* "thinc/neural/ops.pyx":814 * return cpu_floats_ptr2array(dX, (T, O)) * * def add_sum(self, np.ndarray out, np.ndarray to_sum): # <<<<<<<<<<<<<< * VecVec.batch_add_i(out.data, * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) */ __pyx_tuple__251 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_out, __pyx_n_s_to_sum); if (unlikely(!__pyx_tuple__251)) __PYX_ERR(0, 814, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__251); __Pyx_GIVEREF(__pyx_tuple__251); __pyx_codeobj__86 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__251, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_add_sum, 814, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__86)) __PYX_ERR(0, 814, __pyx_L1_error) /* "thinc/neural/ops.pyx":818 * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): # <<<<<<<<<<<<<< * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ */ __pyx_tuple__252 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_out, __pyx_n_s_ids, __pyx_n_s_inputs); if (unlikely(!__pyx_tuple__252)) __PYX_ERR(0, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__252); __Pyx_GIVEREF(__pyx_tuple__252); __pyx_codeobj__87 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__252, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_scatter_add, 818, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__87)) __PYX_ERR(0, 818, __pyx_L1_error) /* "thinc/neural/ops.pyx":838 * @cython.boundscheck(False) * @cython.wraparound(False) * def adam(self, float[::1] weights, float[::1] gradient, float[::1] mom1, # <<<<<<<<<<<<<< * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): */ __pyx_tuple__253 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_weights, __pyx_n_s_gradient, __pyx_n_s_mom1, __pyx_n_s_mom2, __pyx_n_s_beta1, __pyx_n_s_beta2, __pyx_n_s_eps, __pyx_n_s_learn_rate, __pyx_n_s_mod_rate); if (unlikely(!__pyx_tuple__253)) __PYX_ERR(0, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__253); __Pyx_GIVEREF(__pyx_tuple__253); __pyx_codeobj__88 = (PyObject*)__Pyx_PyCode_New(10, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__253, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_adam, 838, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__88)) __PYX_ERR(0, 838, __pyx_L1_error) /* "thinc/neural/ops.pyx":847 * memset(&gradient[0], 0, gradient.size * sizeof(float)) * * def ngrams(self, int n, const uint64_t[::1] keys_): # <<<<<<<<<<<<<< * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) */ __pyx_tuple__254 = PyTuple_Pack(8, __pyx_n_s_self, __pyx_n_s_n, __pyx_n_s_keys, __pyx_n_s_keys_2, __pyx_n_s_length, __pyx_n_s_output_2, __pyx_n_s_output, __pyx_n_s_i); if (unlikely(!__pyx_tuple__254)) __PYX_ERR(0, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__254); __Pyx_GIVEREF(__pyx_tuple__254); __pyx_codeobj__89 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__254, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_ngrams, 847, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__89)) __PYX_ERR(0, 847, __pyx_L1_error) /* "thinc/neural/ops.pyx":856 * return output_ * * def position_encode(self, int N, int D, int period=10000, out=None): # <<<<<<<<<<<<<< * cdef np.ndarray out_ * if out is None: */ __pyx_tuple__255 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_N, __pyx_n_s_D, __pyx_n_s_period, __pyx_n_s_out, __pyx_n_s_out_2); if (unlikely(!__pyx_tuple__255)) __PYX_ERR(0, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__255); __Pyx_GIVEREF(__pyx_tuple__255); __pyx_codeobj__90 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__255, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_position_encode, 856, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__90)) __PYX_ERR(0, 856, __pyx_L1_error) /* "thinc/neural/ops.pyx":977 * xp = cupy * * def matmul(self, x, y, out=None): # <<<<<<<<<<<<<< * return self.xp.matmul(x, y, out=out) * */ __pyx_tuple__256 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_out); if (unlikely(!__pyx_tuple__256)) __PYX_ERR(0, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__256); __Pyx_GIVEREF(__pyx_tuple__256); __pyx_codeobj__91 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__256, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_matmul, 977, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__91)) __PYX_ERR(0, 977, __pyx_L1_error) __pyx_tuple__257 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__257)) __PYX_ERR(0, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__257); __Pyx_GIVEREF(__pyx_tuple__257); /* "thinc/neural/ops.pyx":980 * return self.xp.matmul(x, y, out=out) * * def gemm(self, x, y, out=None, trans1=False, trans2=False): # <<<<<<<<<<<<<< * if trans1: * x = x.T */ __pyx_tuple__258 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_out, __pyx_n_s_trans1, __pyx_n_s_trans2); if (unlikely(!__pyx_tuple__258)) __PYX_ERR(0, 980, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__258); __Pyx_GIVEREF(__pyx_tuple__258); __pyx_codeobj__92 = (PyObject*)__Pyx_PyCode_New(6, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__258, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_gemm, 980, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__92)) __PYX_ERR(0, 980, __pyx_L1_error) __pyx_tuple__259 = PyTuple_Pack(3, ((PyObject *)Py_None), ((PyObject *)Py_False), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__259)) __PYX_ERR(0, 980, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__259); __Pyx_GIVEREF(__pyx_tuple__259); /* "thinc/neural/ops.pyx":991 * return out * * def asarray(self, X, dtype=None): # <<<<<<<<<<<<<< * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) */ __pyx_tuple__260 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_dtype, __pyx_n_s_pointer, __pyx_n_s_shape, __pyx_n_s_array); if (unlikely(!__pyx_tuple__260)) __PYX_ERR(0, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__260); __Pyx_GIVEREF(__pyx_tuple__260); __pyx_codeobj__93 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__260, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_asarray, 991, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__93)) __PYX_ERR(0, 991, __pyx_L1_error) __pyx_tuple__261 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__261)) __PYX_ERR(0, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__261); __Pyx_GIVEREF(__pyx_tuple__261); /* "thinc/neural/ops.pyx":1003 * return self.xp.array(X, dtype=dtype) * * def maxout(self, X): # <<<<<<<<<<<<<< * return _custom_kernels.maxout(X) * */ __pyx_tuple__262 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_X); if (unlikely(!__pyx_tuple__262)) __PYX_ERR(0, 1003, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__262); __Pyx_GIVEREF(__pyx_tuple__262); __pyx_codeobj__94 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__262, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_maxout, 1003, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__94)) __PYX_ERR(0, 1003, __pyx_L1_error) /* "thinc/neural/ops.pyx":1006 * return _custom_kernels.maxout(X) * * def backprop_maxout(self, dY, which, int P): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_maxout(dY, which, P) * */ __pyx_tuple__263 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_which, __pyx_n_s_P); if (unlikely(!__pyx_tuple__263)) __PYX_ERR(0, 1006, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__263); __Pyx_GIVEREF(__pyx_tuple__263); __pyx_codeobj__95 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__263, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_maxout, 1006, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__95)) __PYX_ERR(0, 1006, __pyx_L1_error) /* "thinc/neural/ops.pyx":1009 * return _custom_kernels.backprop_maxout(dY, which, P) * * def relu(self, X, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return X * (X > 0) */ __pyx_tuple__264 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_inplace); if (unlikely(!__pyx_tuple__264)) __PYX_ERR(0, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__264); __Pyx_GIVEREF(__pyx_tuple__264); __pyx_codeobj__96 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__264, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_relu, 1009, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__96)) __PYX_ERR(0, 1009, __pyx_L1_error) __pyx_tuple__265 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__265)) __PYX_ERR(0, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__265); __Pyx_GIVEREF(__pyx_tuple__265); /* "thinc/neural/ops.pyx":1016 * return X * * def backprop_relu(self, delta_, signal_out, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return delta_ * (signal_out > 0) */ __pyx_tuple__266 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_delta, __pyx_n_s_signal_out_2, __pyx_n_s_inplace); if (unlikely(!__pyx_tuple__266)) __PYX_ERR(0, 1016, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__266); __Pyx_GIVEREF(__pyx_tuple__266); __pyx_codeobj__97 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__266, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_relu, 1016, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__97)) __PYX_ERR(0, 1016, __pyx_L1_error) __pyx_tuple__267 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__267)) __PYX_ERR(0, 1016, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__267); __Pyx_GIVEREF(__pyx_tuple__267); /* "thinc/neural/ops.pyx":1022 * return delta_ * * def selu(self, X, inplace=True): # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ __pyx_tuple__268 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_inplace, __pyx_n_s_scale, __pyx_n_s_alpha, __pyx_n_s_out); if (unlikely(!__pyx_tuple__268)) __PYX_ERR(0, 1022, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__268); __Pyx_GIVEREF(__pyx_tuple__268); __pyx_codeobj__98 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__268, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_selu, 1022, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__98)) __PYX_ERR(0, 1022, __pyx_L1_error) __pyx_tuple__269 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__269)) __PYX_ERR(0, 1022, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__269); __Pyx_GIVEREF(__pyx_tuple__269); /* "thinc/neural/ops.pyx":1030 * return out * * def backprop_selu(self, delta, signal_in, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ __pyx_tuple__270 = PyTuple_Pack(7, __pyx_n_s_self, __pyx_n_s_delta_2, __pyx_n_s_signal_in_2, __pyx_n_s_inplace, __pyx_n_s_scale, __pyx_n_s_alpha, __pyx_n_s_out); if (unlikely(!__pyx_tuple__270)) __PYX_ERR(0, 1030, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__270); __Pyx_GIVEREF(__pyx_tuple__270); __pyx_codeobj__99 = (PyObject*)__Pyx_PyCode_New(4, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__270, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_selu, 1030, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__99)) __PYX_ERR(0, 1030, __pyx_L1_error) __pyx_tuple__271 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__271)) __PYX_ERR(0, 1030, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__271); __Pyx_GIVEREF(__pyx_tuple__271); /* "thinc/neural/ops.pyx":1041 * return out * * def mish(self, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.mish(X, threshold=threshold, out=out) * */ __pyx_tuple__272 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out); if (unlikely(!__pyx_tuple__272)) __PYX_ERR(0, 1041, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__272); __Pyx_GIVEREF(__pyx_tuple__272); __pyx_codeobj__100 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__272, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_mish, 1041, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__100)) __PYX_ERR(0, 1041, __pyx_L1_error) __pyx_tuple__273 = PyTuple_Pack(2, ((PyObject *)__pyx_int_5), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__273)) __PYX_ERR(0, 1041, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__273); __Pyx_GIVEREF(__pyx_tuple__273); /* "thinc/neural/ops.pyx":1044 * return _custom_kernels.mish(X, threshold=threshold, out=out) * * def backprop_mish(self, dY, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * */ __pyx_tuple__274 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_X, __pyx_n_s_threshold, __pyx_n_s_out); if (unlikely(!__pyx_tuple__274)) __PYX_ERR(0, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__274); __Pyx_GIVEREF(__pyx_tuple__274); __pyx_codeobj__101 = (PyObject*)__Pyx_PyCode_New(5, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__274, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_mish, 1044, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__101)) __PYX_ERR(0, 1044, __pyx_L1_error) __pyx_tuple__275 = PyTuple_Pack(2, ((PyObject *)__pyx_int_5), ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__275)) __PYX_ERR(0, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__275); __Pyx_GIVEREF(__pyx_tuple__275); /* "thinc/neural/ops.pyx":1047 * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ __pyx_tuple__276 = PyTuple_Pack(5, __pyx_n_s_self, __pyx_n_s_gradient, __pyx_n_s_threshold, __pyx_n_s_xp, __pyx_n_s_grad_norm); if (unlikely(!__pyx_tuple__276)) __PYX_ERR(0, 1047, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__276); __Pyx_GIVEREF(__pyx_tuple__276); __pyx_codeobj__102 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__276, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_clip_gradient, 1047, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__102)) __PYX_ERR(0, 1047, __pyx_L1_error) /* "thinc/neural/ops.pyx":1053 * gradient *= threshold / grad_norm * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_tuple__277 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_seq, __pyx_n_s_nW); if (unlikely(!__pyx_tuple__277)) __PYX_ERR(0, 1053, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__277); __Pyx_GIVEREF(__pyx_tuple__277); __pyx_codeobj__103 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__277, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_seq2col, 1053, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__103)) __PYX_ERR(0, 1053, __pyx_L1_error) /* "thinc/neural/ops.pyx":1060 * return _custom_kernels.seq2col(seq, nW) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_seq2col(dY, nW) * */ __pyx_tuple__278 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_dY, __pyx_n_s_nW); if (unlikely(!__pyx_tuple__278)) __PYX_ERR(0, 1060, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__278); __Pyx_GIVEREF(__pyx_tuple__278); __pyx_codeobj__104 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__278, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_seq2col, 1060, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__104)) __PYX_ERR(0, 1060, __pyx_L1_error) /* "thinc/neural/ops.pyx":1063 * return _custom_kernels.backprop_seq2col(dY, nW) * * def mean_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.mean_pool(X, lengths) * */ __pyx_tuple__279 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__279)) __PYX_ERR(0, 1063, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__279); __Pyx_GIVEREF(__pyx_tuple__279); __pyx_codeobj__105 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__279, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_mean_pool, 1063, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__105)) __PYX_ERR(0, 1063, __pyx_L1_error) /* "thinc/neural/ops.pyx":1066 * return _custom_kernels.mean_pool(X, lengths) * * def backprop_mean_pool(self, d_means, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mean_pool(d_means, lengths) * */ __pyx_tuple__280 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_d_means, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__280)) __PYX_ERR(0, 1066, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__280); __Pyx_GIVEREF(__pyx_tuple__280); __pyx_codeobj__106 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__280, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_mean_pool, 1066, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__106)) __PYX_ERR(0, 1066, __pyx_L1_error) /* "thinc/neural/ops.pyx":1069 * return _custom_kernels.backprop_mean_pool(d_means, lengths) * * def max_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.max_pool(X, lengths) * */ __pyx_tuple__281 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__281)) __PYX_ERR(0, 1069, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__281); __Pyx_GIVEREF(__pyx_tuple__281); __pyx_codeobj__107 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__281, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_max_pool, 1069, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__107)) __PYX_ERR(0, 1069, __pyx_L1_error) /* "thinc/neural/ops.pyx":1072 * return _custom_kernels.max_pool(X, lengths) * * def backprop_max_pool(self, d_maxes, which, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * */ __pyx_tuple__282 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_d_maxes, __pyx_n_s_which, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__282)) __PYX_ERR(0, 1072, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__282); __Pyx_GIVEREF(__pyx_tuple__282); __pyx_codeobj__108 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__282, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_max_pool, 1072, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__108)) __PYX_ERR(0, 1072, __pyx_L1_error) /* "thinc/neural/ops.pyx":1075 * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * * def sum_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.sum_pool(X, lengths) * */ __pyx_tuple__283 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_X, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__283)) __PYX_ERR(0, 1075, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__283); __Pyx_GIVEREF(__pyx_tuple__283); __pyx_codeobj__109 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__283, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_sum_pool, 1075, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__109)) __PYX_ERR(0, 1075, __pyx_L1_error) /* "thinc/neural/ops.pyx":1078 * return _custom_kernels.sum_pool(X, lengths) * * def backprop_sum_pool(self, d_sums, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_sum_pool(d_sums, lengths) * */ __pyx_tuple__284 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_d_sums, __pyx_n_s_lengths); if (unlikely(!__pyx_tuple__284)) __PYX_ERR(0, 1078, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__284); __Pyx_GIVEREF(__pyx_tuple__284); __pyx_codeobj__110 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__284, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_backprop_sum_pool, 1078, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__110)) __PYX_ERR(0, 1078, __pyx_L1_error) /* "thinc/neural/ops.pyx":1083 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, ids, uint64_t seed): # <<<<<<<<<<<<<< * return _custom_kernels.hash(ids, seed) * */ __pyx_tuple__285 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_ids, __pyx_n_s_seed); if (unlikely(!__pyx_tuple__285)) __PYX_ERR(0, 1083, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__285); __Pyx_GIVEREF(__pyx_tuple__285); __pyx_codeobj__111 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__285, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_hash, 1083, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__111)) __PYX_ERR(0, 1083, __pyx_L1_error) /* "thinc/neural/ops.pyx":1086 * return _custom_kernels.hash(ids, seed) * * def scatter_add(self, out, ids, inputs): # <<<<<<<<<<<<<< * self.xp.scatter_add(out, ids, inputs) * */ __pyx_tuple__286 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_out, __pyx_n_s_ids, __pyx_n_s_inputs); if (unlikely(!__pyx_tuple__286)) __PYX_ERR(0, 1086, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__286); __Pyx_GIVEREF(__pyx_tuple__286); __pyx_codeobj__112 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__286, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_scatter_add, 1086, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__112)) __PYX_ERR(0, 1086, __pyx_L1_error) /* "thinc/neural/ops.pyx":1089 * self.xp.scatter_add(out, ids, inputs) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( */ __pyx_tuple__287 = PyTuple_Pack(10, __pyx_n_s_self, __pyx_n_s_weights, __pyx_n_s_gradient, __pyx_n_s_mom1, __pyx_n_s_mom2, __pyx_n_s_beta1, __pyx_n_s_beta2, __pyx_n_s_eps, __pyx_n_s_learn_rate, __pyx_n_s_mod_rate); if (unlikely(!__pyx_tuple__287)) __PYX_ERR(0, 1089, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__287); __Pyx_GIVEREF(__pyx_tuple__287); __pyx_codeobj__113 = (PyObject*)__Pyx_PyCode_New(10, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__287, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_adam, 1089, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__113)) __PYX_ERR(0, 1089, __pyx_L1_error) __pyx_tuple__288 = PyTuple_Pack(1, ((PyObject*)__pyx_float_1_)); if (unlikely(!__pyx_tuple__288)) __PYX_ERR(0, 1089, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__288); __Pyx_GIVEREF(__pyx_tuple__288); /* "thinc/neural/ops.pyx":1101 * gradient.fill(0) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) */ __pyx_tuple__289 = PyTuple_Pack(6, __pyx_n_s_self, __pyx_n_s_W, __pyx_n_s_fan_in, __pyx_n_s_inplace, __pyx_n_s_scale, __pyx_n_s_inits); if (unlikely(!__pyx_tuple__289)) __PYX_ERR(0, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__289); __Pyx_GIVEREF(__pyx_tuple__289); __pyx_codeobj__115 = (PyObject*)__Pyx_PyCode_New(4, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__289, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_normal_init, 1101, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__115)) __PYX_ERR(0, 1101, __pyx_L1_error) __pyx_tuple__290 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__290)) __PYX_ERR(0, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__290); __Pyx_GIVEREF(__pyx_tuple__290); /* "thinc/neural/ops.pyx":1111 * return inits * * def position_encode(self, *args, **kwargs): # <<<<<<<<<<<<<< * positions = NumpyOps().position_encode(*args, **kwargs) * return self.asarray(positions) */ __pyx_tuple__291 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_args, __pyx_n_s_kwargs, __pyx_n_s_positions); if (unlikely(!__pyx_tuple__291)) __PYX_ERR(0, 1111, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__291); __Pyx_GIVEREF(__pyx_tuple__291); __pyx_codeobj__116 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARARGS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__291, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_position_encode, 1111, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__116)) __PYX_ERR(0, 1111, __pyx_L1_error) /* "thinc/neural/ops.pyx":1204 * * * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): # <<<<<<<<<<<<<< * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: */ __pyx_tuple__292 = PyTuple_Pack(3, __pyx_n_s_gradient, __pyx_n_s_threshold, __pyx_n_s_grad_norm); if (unlikely(!__pyx_tuple__292)) __PYX_ERR(0, 1204, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__292); __Pyx_GIVEREF(__pyx_tuple__292); __pyx_codeobj__117 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__292, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_cpu_clip_gradient, 1204, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__117)) __PYX_ERR(0, 1204, __pyx_L1_error) /* "thinc/neural/ops.pyx":1210 * * * def add_gradient_noise(float[::1] gradient, weight_t noise_level, # <<<<<<<<<<<<<< * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) */ __pyx_tuple__293 = PyTuple_Pack(4, __pyx_n_s_gradient, __pyx_n_s_noise_level, __pyx_n_s_timestep, __pyx_n_s_variance); if (unlikely(!__pyx_tuple__293)) __PYX_ERR(0, 1210, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__293); __Pyx_GIVEREF(__pyx_tuple__293); __pyx_codeobj__118 = (PyObject*)__Pyx_PyCode_New(3, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__293, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_ops_pyx, __pyx_n_s_add_gradient_noise, 1210, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__118)) __PYX_ERR(0, 1210, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("") # <<<<<<<<<<<<<< * cdef strided = Enum("") # default * cdef indirect = Enum("") */ __pyx_tuple__294 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__294)) __PYX_ERR(3, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__294); __Pyx_GIVEREF(__pyx_tuple__294); /* "View.MemoryView":287 * * cdef generic = Enum("") * cdef strided = Enum("") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("") * */ __pyx_tuple__295 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__295)) __PYX_ERR(3, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__295); __Pyx_GIVEREF(__pyx_tuple__295); /* "View.MemoryView":288 * cdef generic = Enum("") * cdef strided = Enum("") # default * cdef indirect = Enum("") # <<<<<<<<<<<<<< * * */ __pyx_tuple__296 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__296)) __PYX_ERR(3, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__296); __Pyx_GIVEREF(__pyx_tuple__296); /* "View.MemoryView":291 * * * cdef contiguous = Enum("") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("") * */ __pyx_tuple__297 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__297)) __PYX_ERR(3, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__297); __Pyx_GIVEREF(__pyx_tuple__297); /* "View.MemoryView":292 * * cdef contiguous = Enum("") * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< * * */ __pyx_tuple__298 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__298)) __PYX_ERR(3, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__298); __Pyx_GIVEREF(__pyx_tuple__298); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__299 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__299)) __PYX_ERR(3, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__299); __Pyx_GIVEREF(__pyx_tuple__299); __pyx_codeobj__143 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__299, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__143)) __PYX_ERR(3, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_0_ = PyFloat_FromDouble(0.); if (unlikely(!__pyx_float_0_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_1_ = PyFloat_FromDouble(1.); if (unlikely(!__pyx_float_1_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_2_ = PyFloat_FromDouble(2.); if (unlikely(!__pyx_float_2_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_4_ = PyFloat_FromDouble(4.); if (unlikely(!__pyx_float_4_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_6_ = PyFloat_FromDouble(6.); if (unlikely(!__pyx_float_6_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_0_0 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_float_0_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_20_ = PyFloat_FromDouble(20.); if (unlikely(!__pyx_float_20_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_10_0 = PyFloat_FromDouble(10.0); if (unlikely(!__pyx_float_10_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_1eneg_8 = PyFloat_FromDouble(1e-8); if (unlikely(!__pyx_float_1eneg_8)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_0_9999 = PyFloat_FromDouble(0.9999); if (unlikely(!__pyx_float_0_9999)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_float_neg_20_ = PyFloat_FromDouble(-20.); if (unlikely(!__pyx_float_neg_20_)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_20 = PyInt_FromLong(20); if (unlikely(!__pyx_int_20)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ if (__Pyx_ExportFunction("seq2col", (void (*)(void))__pyx_f_5thinc_6neural_3ops_seq2col, "void (float *, float const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("backprop_seq2col", (void (*)(void))__pyx_f_5thinc_6neural_3ops_backprop_seq2col, "void (float *, float const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_maxout", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_maxout, "void (float *, int *, float const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_backprop_maxout", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_backprop_maxout, "void (float *, float const *, int const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_mean_pool", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_mean_pool, "void (float *, float const *, int const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_backprop_mean_pool", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_backprop_mean_pool, "void (float *, float const *, int const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_max_pool", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_max_pool, "void (float *, int *, float const *, int const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ExportFunction("cpu_backprop_max_pool", (void (*)(void))__pyx_f_5thinc_6neural_3ops_cpu_backprop_max_pool, "void (float *, float const *, int const *, int const *, int, int, int)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences) < 0) __PYX_ERR(0, 95, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences.tp_dictoffset && __pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences = &__pyx_type_5thinc_6neural_3ops___pyx_scope_struct__dropout_sequences; if (PyType_Ready(&__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop) < 0) __PYX_ERR(0, 99, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop.tp_dictoffset && __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop = &__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_1_wrap_backprop; if (PyType_Ready(&__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout) < 0) __PYX_ERR(0, 120, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout.tp_dictoffset && __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_2_dropout = &__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_2_dropout; if (PyType_Ready(&__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop) < 0) __PYX_ERR(0, 126, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop.tp_dictoffset && __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop = &__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_3_wrap_backprop; if (PyType_Ready(&__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences) < 0) __PYX_ERR(0, 170, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences.tp_dictoffset && __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; } __pyx_ptype_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences = &__pyx_type_5thinc_6neural_3ops___pyx_scope_struct_4_square_sequences; __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(3, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(3, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(3, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(3, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(3, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(3, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(3, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(3, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(3, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(3, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(3, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule("cymem.cymem"); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5cymem_5cymem_PyMalloc = __Pyx_ImportType(__pyx_t_1, "cymem.cymem", "PyMalloc", sizeof(struct __pyx_obj_5cymem_5cymem_PyMalloc), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5cymem_5cymem_PyMalloc) __PYX_ERR(4, 4, __pyx_L1_error) __pyx_vtabptr_5cymem_5cymem_PyMalloc = (struct __pyx_vtabstruct_5cymem_5cymem_PyMalloc*)__Pyx_GetVtable(__pyx_ptype_5cymem_5cymem_PyMalloc->tp_dict); if (unlikely(!__pyx_vtabptr_5cymem_5cymem_PyMalloc)) __PYX_ERR(4, 4, __pyx_L1_error) __pyx_ptype_5cymem_5cymem_PyFree = __Pyx_ImportType(__pyx_t_1, "cymem.cymem", "PyFree", sizeof(struct __pyx_obj_5cymem_5cymem_PyFree), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5cymem_5cymem_PyFree) __PYX_ERR(4, 10, __pyx_L1_error) __pyx_vtabptr_5cymem_5cymem_PyFree = (struct __pyx_vtabstruct_5cymem_5cymem_PyFree*)__Pyx_GetVtable(__pyx_ptype_5cymem_5cymem_PyFree->tp_dict); if (unlikely(!__pyx_vtabptr_5cymem_5cymem_PyFree)) __PYX_ERR(4, 10, __pyx_L1_error) __pyx_ptype_5cymem_5cymem_Pool = __Pyx_ImportType(__pyx_t_1, "cymem.cymem", "Pool", sizeof(struct __pyx_obj_5cymem_5cymem_Pool), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5cymem_5cymem_Pool) __PYX_ERR(4, 16, __pyx_L1_error) __pyx_vtabptr_5cymem_5cymem_Pool = (struct __pyx_vtabstruct_5cymem_5cymem_Pool*)__Pyx_GetVtable(__pyx_ptype_5cymem_5cymem_Pool->tp_dict); if (unlikely(!__pyx_vtabptr_5cymem_5cymem_Pool)) __PYX_ERR(4, 16, __pyx_L1_error) __pyx_ptype_5cymem_5cymem_Address = __Pyx_ImportType(__pyx_t_1, "cymem.cymem", "Address", sizeof(struct __pyx_obj_5cymem_5cymem_Address), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5cymem_5cymem_Address) __PYX_ERR(4, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("preshed.maps"); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7preshed_4maps_PreshMap = __Pyx_ImportType(__pyx_t_1, "preshed.maps", "PreshMap", sizeof(struct __pyx_obj_7preshed_4maps_PreshMap), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7preshed_4maps_PreshMap) __PYX_ERR(5, 45, __pyx_L1_error) __pyx_vtabptr_7preshed_4maps_PreshMap = (struct __pyx_vtabstruct_7preshed_4maps_PreshMap*)__Pyx_GetVtable(__pyx_ptype_7preshed_4maps_PreshMap->tp_dict); if (unlikely(!__pyx_vtabptr_7preshed_4maps_PreshMap)) __PYX_ERR(5, 45, __pyx_L1_error) __pyx_ptype_7preshed_4maps_PreshMapArray = __Pyx_ImportType(__pyx_t_1, "preshed.maps", "PreshMapArray", sizeof(struct __pyx_obj_7preshed_4maps_PreshMapArray), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7preshed_4maps_PreshMapArray) __PYX_ERR(5, 53, __pyx_L1_error) __pyx_vtabptr_7preshed_4maps_PreshMapArray = (struct __pyx_vtabstruct_7preshed_4maps_PreshMapArray*)__Pyx_GetVtable(__pyx_ptype_7preshed_4maps_PreshMapArray->tp_dict); if (unlikely(!__pyx_vtabptr_7preshed_4maps_PreshMapArray)) __PYX_ERR(5, 53, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(6, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(6, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("thinc.linalg"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5thinc_6linalg_Matrix = __Pyx_ImportType(__pyx_t_1, "thinc.linalg", "Matrix", sizeof(struct __pyx_obj_5thinc_6linalg_Matrix), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5thinc_6linalg_Matrix) __PYX_ERR(2, 22, __pyx_L1_error) __pyx_ptype_5thinc_6linalg_Vec = __Pyx_ImportType(__pyx_t_1, "thinc.linalg", "Vec", sizeof(struct __pyx_obj_5thinc_6linalg_Vec), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5thinc_6linalg_Vec) __PYX_ERR(2, 29, __pyx_L1_error) __pyx_vtabptr_5thinc_6linalg_Vec = (struct __pyx_vtabstruct_5thinc_6linalg_Vec*)__Pyx_GetVtable(__pyx_ptype_5thinc_6linalg_Vec->tp_dict); if (unlikely(!__pyx_vtabptr_5thinc_6linalg_Vec)) __PYX_ERR(2, 29, __pyx_L1_error) __pyx_ptype_5thinc_6linalg_VecVec = __Pyx_ImportType(__pyx_t_1, "thinc.linalg", "VecVec", sizeof(struct __pyx_obj_5thinc_6linalg_VecVec), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5thinc_6linalg_VecVec) __PYX_ERR(2, 158, __pyx_L1_error) __pyx_vtabptr_5thinc_6linalg_VecVec = (struct __pyx_vtabstruct_5thinc_6linalg_VecVec*)__Pyx_GetVtable(__pyx_ptype_5thinc_6linalg_VecVec->tp_dict); if (unlikely(!__pyx_vtabptr_5thinc_6linalg_VecVec)) __PYX_ERR(2, 158, __pyx_L1_error) __pyx_ptype_5thinc_6linalg_Mat = __Pyx_ImportType(__pyx_t_1, "thinc.linalg", "Mat", sizeof(struct __pyx_obj_5thinc_6linalg_Mat), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5thinc_6linalg_Mat) __PYX_ERR(2, 249, __pyx_L1_error) __pyx_vtabptr_5thinc_6linalg_Mat = (struct __pyx_vtabstruct_5thinc_6linalg_Mat*)__Pyx_GetVtable(__pyx_ptype_5thinc_6linalg_Mat->tp_dict); if (unlikely(!__pyx_vtabptr_5thinc_6linalg_Mat)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __pyx_t_1 = PyImport_ImportModule("murmurhash.mrmr"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "hash64", (void (**)(void))&__pyx_f_10murmurhash_4mrmr_hash64, "uint64_t (void *, int, uint64_t)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "hash128_x86", (void (**)(void))&__pyx_f_10murmurhash_4mrmr_hash128_x86, "void (void const *, int, uint32_t, void *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "hash128_x64", (void (**)(void))&__pyx_f_10murmurhash_4mrmr_hash128_x64, "void (void const *, int, uint32_t, void *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initops(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initops(void) #else __Pyx_PyMODINIT_FUNC PyInit_ops(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_ops(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_ops(PyObject *__pyx_pyinit_module) #endif #endif { __Pyx_TraceDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; static PyThread_type_lock __pyx_t_10[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'ops' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_ops(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("ops", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_thinc__neural__ops) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "thinc.neural.ops")) { if (unlikely(PyDict_SetItemString(modules, "thinc.neural.ops", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); if (unlikely(__Pyx_modinit_function_export_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); if (unlikely(__Pyx_modinit_function_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif __Pyx_TraceCall("__Pyx_PyMODINIT_FUNC PyInit_ops(void)", __pyx_f[0], 1, 0, __PYX_ERR(0, 1, __pyx_L1_error)); /* "thinc/neural/ops.pyx":13 * from preshed.maps cimport PreshMap * * import numpy # <<<<<<<<<<<<<< * from numpy import prod * from numpy cimport ndarray */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":14 * * import numpy * from numpy import prod # <<<<<<<<<<<<<< * from numpy cimport ndarray * try: */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_prod); __Pyx_GIVEREF(__pyx_n_s_prod); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_prod); __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_prod); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_prod, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":16 * from numpy import prod * from numpy cimport ndarray * try: # <<<<<<<<<<<<<< * # Python >= 3.3 * from collections.abc import Sized */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "thinc/neural/ops.pyx":18 * try: * # Python >= 3.3 * from collections.abc import Sized # <<<<<<<<<<<<<< * except ImportError: * # Python < 3.3 */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18, __pyx_L2_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_Sized); __Pyx_GIVEREF(__pyx_n_s_Sized); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Sized); __pyx_t_1 = __Pyx_Import(__pyx_n_s_collections_abc, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L2_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Sized); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18, __pyx_L2_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Sized, __pyx_t_2) < 0) __PYX_ERR(0, 18, __pyx_L2_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":16 * from numpy import prod * from numpy cimport ndarray * try: # <<<<<<<<<<<<<< * # Python >= 3.3 * from collections.abc import Sized */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L7_try_end; __pyx_L2_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":19 * # Python >= 3.3 * from collections.abc import Sized * except ImportError: # <<<<<<<<<<<<<< * # Python < 3.3 * from collections import Sized */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError); if (__pyx_t_6) { __Pyx_AddTraceback("thinc.neural.ops", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_7) < 0) __PYX_ERR(0, 19, __pyx_L4_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_2); __Pyx_GOTREF(__pyx_t_7); /* "thinc/neural/ops.pyx":21 * except ImportError: * # Python < 3.3 * from collections import Sized # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 21, __pyx_L4_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_n_s_Sized); __Pyx_GIVEREF(__pyx_n_s_Sized); PyList_SET_ITEM(__pyx_t_8, 0, __pyx_n_s_Sized); __pyx_t_9 = __Pyx_Import(__pyx_n_s_collections, __pyx_t_8, -1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21, __pyx_L4_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_ImportFrom(__pyx_t_9, __pyx_n_s_Sized); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 21, __pyx_L4_except_error) __Pyx_GOTREF(__pyx_t_8); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Sized, __pyx_t_8) < 0) __PYX_ERR(0, 21, __pyx_L4_except_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L3_exception_handled; } goto __pyx_L4_except_error; __pyx_L4_except_error:; /* "thinc/neural/ops.pyx":16 * from numpy import prod * from numpy cimport ndarray * try: # <<<<<<<<<<<<<< * # Python >= 3.3 * from collections.abc import Sized */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L3_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L7_try_end:; } /* "thinc/neural/ops.pyx":24 * cimport numpy as np * * from . import _custom_kernels # <<<<<<<<<<<<<< * from ._aligned_alloc import zeros_aligned * from ..typedefs cimport weight_t */ __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_s_custom_kernels); __Pyx_GIVEREF(__pyx_n_s_custom_kernels); PyList_SET_ITEM(__pyx_t_7, 0, __pyx_n_s_custom_kernels); __pyx_t_2 = __Pyx_Import(__pyx_n_s__144, __pyx_t_7, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_custom_kernels, __pyx_t_7) < 0) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":25 * * from . import _custom_kernels * from ._aligned_alloc import zeros_aligned # <<<<<<<<<<<<<< * from ..typedefs cimport weight_t * from .util import copy_array, get_array_module */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_zeros_aligned); __Pyx_GIVEREF(__pyx_n_s_zeros_aligned); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_zeros_aligned); __pyx_t_7 = __Pyx_Import(__pyx_n_s_aligned_alloc, __pyx_t_2, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_7, __pyx_n_s_zeros_aligned); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_zeros_aligned, __pyx_t_2) < 0) __PYX_ERR(0, 25, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":27 * from ._aligned_alloc import zeros_aligned * from ..typedefs cimport weight_t * from .util import copy_array, get_array_module # <<<<<<<<<<<<<< * from ..linalg cimport VecVec, Vec * */ __pyx_t_7 = PyList_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_s_copy_array); __Pyx_GIVEREF(__pyx_n_s_copy_array); PyList_SET_ITEM(__pyx_t_7, 0, __pyx_n_s_copy_array); __Pyx_INCREF(__pyx_n_s_get_array_module); __Pyx_GIVEREF(__pyx_n_s_get_array_module); PyList_SET_ITEM(__pyx_t_7, 1, __pyx_n_s_get_array_module); __pyx_t_2 = __Pyx_Import(__pyx_n_s_util, __pyx_t_7, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_copy_array); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_copy_array, __pyx_t_7) < 0) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_get_array_module); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_array_module, __pyx_t_7) < 0) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":31 * * from murmurhash.mrmr cimport hash64, hash128_x86, hash128_x64 * from ..compat import integer_types # <<<<<<<<<<<<<< * from . import _custom_kernels * */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_integer_types); __Pyx_GIVEREF(__pyx_n_s_integer_types); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_integer_types); __pyx_t_7 = __Pyx_Import(__pyx_n_s_compat, __pyx_t_2, 2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 31, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_7, __pyx_n_s_integer_types); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_integer_types, __pyx_t_2) < 0) __PYX_ERR(0, 31, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":32 * from murmurhash.mrmr cimport hash64, hash128_x86, hash128_x64 * from ..compat import integer_types * from . import _custom_kernels # <<<<<<<<<<<<<< * * cimport blis */ __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_n_s_custom_kernels); __Pyx_GIVEREF(__pyx_n_s_custom_kernels); PyList_SET_ITEM(__pyx_t_7, 0, __pyx_n_s_custom_kernels); __pyx_t_2 = __Pyx_Import(__pyx_n_s__144, __pyx_t_7, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_custom_kernels); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_custom_kernels, __pyx_t_7) < 0) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":36 * cimport blis * cimport blis.cy * import blis.py # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_blis_py, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_blis, __pyx_t_2) < 0) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":47 * float cosf(float x) nogil * * try: # <<<<<<<<<<<<<< * import cupy * import cupy.cuda */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "thinc/neural/ops.pyx":48 * * try: * import cupy # <<<<<<<<<<<<<< * import cupy.cuda * from cupy.cuda.compiler import compile_with_cache */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_cupy, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 48, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cupy, __pyx_t_2) < 0) __PYX_ERR(0, 48, __pyx_L10_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":49 * try: * import cupy * import cupy.cuda # <<<<<<<<<<<<<< * from cupy.cuda.compiler import compile_with_cache * # We no longer have to set up the memory pool, fortunately. */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_cupy_cuda, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cupy, __pyx_t_2) < 0) __PYX_ERR(0, 49, __pyx_L10_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":50 * import cupy * import cupy.cuda * from cupy.cuda.compiler import compile_with_cache # <<<<<<<<<<<<<< * # We no longer have to set up the memory pool, fortunately. * except ImportError: */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_compile_with_cache); __Pyx_GIVEREF(__pyx_n_s_compile_with_cache); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_compile_with_cache); __pyx_t_7 = __Pyx_Import(__pyx_n_s_cupy_cuda_compiler, __pyx_t_2, -1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 50, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_7, __pyx_n_s_compile_with_cache); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_compile_with_cache, __pyx_t_2) < 0) __PYX_ERR(0, 50, __pyx_L10_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":47 * float cosf(float x) nogil * * try: # <<<<<<<<<<<<<< * import cupy * import cupy.cuda */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L15_try_end; __pyx_L10_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":52 * from cupy.cuda.compiler import compile_with_cache * # We no longer have to set up the memory pool, fortunately. * except ImportError: # <<<<<<<<<<<<<< * cupy = None * */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError); if (__pyx_t_6) { __Pyx_AddTraceback("thinc.neural.ops", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_2, &__pyx_t_1) < 0) __PYX_ERR(0, 52, __pyx_L12_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_2); __Pyx_GOTREF(__pyx_t_1); /* "thinc/neural/ops.pyx":53 * # We no longer have to set up the memory pool, fortunately. * except ImportError: * cupy = None # <<<<<<<<<<<<<< * * */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_cupy, Py_None) < 0) __PYX_ERR(0, 53, __pyx_L12_except_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L11_exception_handled; } goto __pyx_L12_except_error; __pyx_L12_except_error:; /* "thinc/neural/ops.pyx":47 * float cosf(float x) nogil * * try: # <<<<<<<<<<<<<< * import cupy * import cupy.cuda */ __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L11_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L15_try_end:; } /* "thinc/neural/ops.pyx":57 * * * class Ops(object): # <<<<<<<<<<<<<< * device = 'cpu' * xp = None */ __pyx_t_1 = __Pyx_CalculateMetaclass(NULL, __pyx_tuple__145); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 57, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_Py3MetaclassPrepare(__pyx_t_1, __pyx_tuple__145, __pyx_n_s_Ops, __pyx_n_s_Ops, (PyObject *) NULL, __pyx_n_s_thinc_neural_ops, (PyObject *) NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 57, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "thinc/neural/ops.pyx":58 * * class Ops(object): * device = 'cpu' # <<<<<<<<<<<<<< * xp = None * */ if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_device, __pyx_n_s_cpu) < 0) __PYX_ERR(0, 58, __pyx_L1_error) /* "thinc/neural/ops.pyx":59 * class Ops(object): * device = 'cpu' * xp = None # <<<<<<<<<<<<<< * * def __init__(self, xp=None): */ if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_xp, Py_None) < 0) __PYX_ERR(0, 59, __pyx_L1_error) /* "thinc/neural/ops.pyx":61 * xp = None * * def __init__(self, xp=None): # <<<<<<<<<<<<<< * if xp is not None: * self.xp = xp */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_1__init__, 0, __pyx_n_s_Ops___init, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj_)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__147); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_7) < 0) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":65 * self.xp = xp * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_3seq2col, 0, __pyx_n_s_Ops_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__2)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_seq2col, __pyx_t_7) < 0) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":81 * return cols.reshape((B, I * (2*nW+1))) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * # This is a test implementation that only supports nW=1 * assert nW == 1 */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_5backprop_seq2col, 0, __pyx_n_s_Ops_backprop_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__4)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_seq2col, __pyx_t_7) < 0) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":95 * return dX * * def dropout_sequences(self, X, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return X, lambda func: func */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_7dropout_sequences, 0, __pyx_n_s_Ops_dropout_sequences, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__7)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__151); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_dropout_sequences, __pyx_t_7) < 0) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":120 * return masked, wrap_backprop * * def dropout(self, x, dropout, inplace=False): # <<<<<<<<<<<<<< * if dropout is None or dropout <= 0.0: * return x, lambda func: func */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_9dropout, 0, __pyx_n_s_Ops_dropout, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__12)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__153); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_dropout, __pyx_t_7) < 0) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":136 * return x * mask, wrap_backprop * * def flatten(self, X, dtype=None, pad=0): # <<<<<<<<<<<<<< * if X is None or len(X) == 0: * return self.allocate((0,), dtype=dtype or 'f') */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_11flatten, 0, __pyx_n_s_Ops_flatten, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__15)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__155); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_flatten, __pyx_t_7) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":155 * return result * * def unflatten(self, X, lengths, pad=0): # <<<<<<<<<<<<<< * unflat = [] * pad = int(pad) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_13unflatten, 0, __pyx_n_s_Ops_unflatten, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__19)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__157); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_unflatten, __pyx_t_7) < 0) __PYX_ERR(0, 155, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":170 * return unflat * * def square_sequences(self, seqs): # <<<<<<<<<<<<<< * '''Sort a batch of sequence by decreasing length, pad, and transpose * so that the outer dimension is the timestep. Return the padded batch, */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_15square_sequences, 0, __pyx_n_s_Ops_square_sequences, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_square_sequences, __pyx_t_7) < 0) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":207 * @cython.boundscheck(False) * @cython.wraparound(False) * def get_dropout_mask(self, shape, drop): # <<<<<<<<<<<<<< * if drop is None or drop <= 0: * return None */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_17get_dropout_mask, 0, __pyx_n_s_Ops_get_dropout_mask, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_get_dropout_mask, __pyx_t_7) < 0) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":216 * return self.asarray(mask, dtype='float32') * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_19allocate, 0, __pyx_n_s_Ops_allocate, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__161); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_allocate, __pyx_t_7) < 0) __PYX_ERR(0, 216, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":222 * return self.xp.zeros(shape, dtype=dtype) * * def unzip(self, data): # <<<<<<<<<<<<<< * X, y = zip(*data) * return self.asarray(X), self.asarray(y) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_21unzip, 0, __pyx_n_s_Ops_unzip, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_unzip, __pyx_t_7) < 0) __PYX_ERR(0, 222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":226 * return self.asarray(X), self.asarray(y) * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_23asarray, 0, __pyx_n_s_Ops_asarray, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__164); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_asarray, __pyx_t_7) < 0) __PYX_ERR(0, 226, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":240 * return self.xp.array(data) * * def batch_dot(self, x, y, transpose=False): # <<<<<<<<<<<<<< * # TODO: Fix this confusing inversion =/ * if not transpose: */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_25batch_dot, 0, __pyx_n_s_Ops_batch_dot, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__166); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_batch_dot, __pyx_t_7) < 0) __PYX_ERR(0, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":247 * return self.xp.dot(x, y) * * def add_batch_outer(self, output, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * output += self.xp.tensordot(x, y, axes=[[0], [0]]) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_27add_batch_outer, 0, __pyx_n_s_Ops_add_batch_outer, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__29)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_add_batch_outer, __pyx_t_7) < 0) __PYX_ERR(0, 247, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":251 * output += self.xp.tensordot(x, y, axes=[[0], [0]]) * * def norm(self, x): # <<<<<<<<<<<<<< * return self.xp.sqrt((x * x).sum()) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_29norm, 0, __pyx_n_s_Ops_norm, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_norm, __pyx_t_7) < 0) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":254 * return self.xp.sqrt((x * x).sum()) * * def dot(self, x, y): # <<<<<<<<<<<<<< * # TODO: Deprecate this * return self.xp.dot(x, y) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_31dot, 0, __pyx_n_s_Ops_dot, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__31)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_dot, __pyx_t_7) < 0) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":258 * return self.xp.dot(x, y) * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * return self.gemm(signal, weights, trans2=True) + bias * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_33affine, 0, __pyx_n_s_Ops_affine, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_affine, __pyx_t_7) < 0) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":261 * return self.gemm(signal, weights, trans2=True) + bias * * def add_sum(self, out, to_sum): # <<<<<<<<<<<<<< * out += to_sum.sum(axis=0) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_35add_sum, 0, __pyx_n_s_Ops_add_sum, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__33)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_add_sum, __pyx_t_7) < 0) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":264 * out += to_sum.sum(axis=0) * * def argmax(self, x, axis=-1): # <<<<<<<<<<<<<< * return self.xp.argmax(x, axis=axis) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_37argmax, 0, __pyx_n_s_Ops_argmax, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__34)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__173); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_argmax, __pyx_t_7) < 0) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":267 * return self.xp.argmax(x, axis=axis) * * def sigmoid(self, X): # <<<<<<<<<<<<<< * return 1./(1. + self.xp.exp(-X)) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_39sigmoid, 0, __pyx_n_s_Ops_sigmoid, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 267, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_sigmoid, __pyx_t_7) < 0) __PYX_ERR(0, 267, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":270 * return 1./(1. + self.xp.exp(-X)) * * def dsigmoid(self, y): # <<<<<<<<<<<<<< * return y*(1-y) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_41dsigmoid, 0, __pyx_n_s_Ops_dsigmoid, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__36)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 270, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_dsigmoid, __pyx_t_7) < 0) __PYX_ERR(0, 270, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":273 * return y*(1-y) * * def dtanh(self, y): # <<<<<<<<<<<<<< * return 1-y**2 * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_43dtanh, 0, __pyx_n_s_Ops_dtanh, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__37)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_dtanh, __pyx_t_7) < 0) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":276 * return 1-y**2 * * def softmax(self, x, inplace=False, axis=-1): # <<<<<<<<<<<<<< * shape = x.shape * maxes = self.xp.max(x, axis=axis, keepdims=True) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_45softmax, 0, __pyx_n_s_Ops_softmax, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__38)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__178); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_softmax, __pyx_t_7) < 0) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":288 * return new_x * * def softmax_sequences(self, Xs, lengths, inplace=False, axis=-1): # <<<<<<<<<<<<<< * if Xs.ndim >= 3: * raise NotImplementedError( */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_47softmax_sequences, 0, __pyx_n_s_Ops_softmax_sequences, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__39)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__180); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_softmax_sequences, __pyx_t_7) < 0) __PYX_ERR(0, 288, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":303 * return new_x * * def backprop_softmax(self, Y, dY, axis=-1): # <<<<<<<<<<<<<< * dX = Y * dY * dX -= Y * dX.sum(axis=axis, keepdims=True) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_49backprop_softmax, 0, __pyx_n_s_Ops_backprop_softmax, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__40)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__182); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_softmax, __pyx_t_7) < 0) __PYX_ERR(0, 303, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":308 * return dX * * def backprop_softmax_sequences(self, dy, y, lengths): # <<<<<<<<<<<<<< * dx = y * dy * sumdx = self.backprop_sum_pool(self.sum_pool(dx, lengths), lengths) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_51backprop_softmax_sequences, 0, __pyx_n_s_Ops_backprop_softmax_sequences, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__41)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_softmax_sequences, __pyx_t_7) < 0) __PYX_ERR(0, 308, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":314 * return dx * * def expand_dims(self, a, axis=-1): # <<<<<<<<<<<<<< * return self.xp.expand_dims(a, axis=axis) * */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_53expand_dims, 0, __pyx_n_s_Ops_expand_dims, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__42)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__185); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_expand_dims, __pyx_t_7) < 0) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":317 * return self.xp.expand_dims(a, axis=axis) * * def clip_low(self, x, value, inplace=False): # <<<<<<<<<<<<<< * if inplace: * return self.xp.maximum(x, value, out=x) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_55clip_low, 0, __pyx_n_s_Ops_clip_low, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__43)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__187); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_clip_low, __pyx_t_7) < 0) __PYX_ERR(0, 317, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":323 * return self.xp.maximum(x, value) * * def take_which(self, x, which, axis=-1): # <<<<<<<<<<<<<< * output = self.allocate(which.shape) * for i in range(x.shape[axis]): */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_57take_which, 0, __pyx_n_s_Ops_take_which, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__44)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__189); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_take_which, __pyx_t_7) < 0) __PYX_ERR(0, 323, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":329 * return output * * def backprop_take(self, dX__bo, which__bo, nP): # <<<<<<<<<<<<<< * dX__bop = self.allocate((dX__bo.shape[0], dX__bo.shape[1], nP)) * for i in range(nP): */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_59backprop_take, 0, __pyx_n_s_Ops_backprop_take, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__45)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_take, __pyx_t_7) < 0) __PYX_ERR(0, 329, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":335 * return dX__bop * * def lstm(self, output, cells, act_pieces, prev): # <<<<<<<<<<<<<< * hf, hi, ho, hc = act_pieces * hf[:] = self.sigmoid(hf) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_61lstm, 0, __pyx_n_s_Ops_lstm, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__46)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 335, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_lstm, __pyx_t_7) < 0) __PYX_ERR(0, 335, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":345 * output[:] = self.xp.tanh(cells) * ho * * def backprop_lstm(self, d_cells, d_prev, d_gate_pieces, # <<<<<<<<<<<<<< * d_output, gate_pieces, cells, prev): * hf, hi, ho, hc = gate_pieces */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_63backprop_lstm, 0, __pyx_n_s_Ops_backprop_lstm, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 345, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_lstm, __pyx_t_7) < 0) __PYX_ERR(0, 345, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":366 * copy_array(d_cells, dc) * * def softplus(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * log1p_exp = xp.log1p(xp.exp(X)) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_65softplus, 0, __pyx_n_s_Ops_softplus, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__48)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 366, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__194); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_softplus, __pyx_t_7) < 0) __PYX_ERR(0, 366, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":377 * return out * * def backprop_softplus(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * if out is None: */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_67backprop_softplus, 0, __pyx_n_s_Ops_backprop_softplus, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__49)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__196); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_softplus, __pyx_t_7) < 0) __PYX_ERR(0, 377, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":387 * return out * * def mish(self, X, threshold=20., out=None): # <<<<<<<<<<<<<< * Xsoft = self.softplus(X, threshold=threshold, out=out) * Y = self.xp.tanh(Xsoft, out=out) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_69mish, 0, __pyx_n_s_Ops_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__50)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__198); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_mish, __pyx_t_7) < 0) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":393 * return Y * * def backprop_mish(self, dY, X, threshold=20, out=None): # <<<<<<<<<<<<<< * xp = get_array_module(X) * indices = X < threshold */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_71backprop_mish, 0, __pyx_n_s_Ops_backprop_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__51)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__200); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_backprop_mish, __pyx_t_7) < 0) __PYX_ERR(0, 393, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":412 * return out * * def xavier_uniform_init(self, W, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_73xavier_uniform_init, 0, __pyx_n_s_Ops_xavier_uniform_init, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__52)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__202); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_xavier_uniform_init, __pyx_t_7) < 0) __PYX_ERR(0, 412, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":422 * return self.xp.random.uniform(-scale, scale, W.shape) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * if (W**2).sum() != 0.: * return W */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_75normal_init, 0, __pyx_n_s_Ops_normal_init, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__53)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__204); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_normal_init, __pyx_t_7) < 0) __PYX_ERR(0, 422, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":434 * return inits * * def he_normal_init(self, shape, fan_in): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(2. / fan_in) * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_77he_normal_init, 0, __pyx_n_s_Ops_he_normal_init, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__54)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_he_normal_init, __pyx_t_7) < 0) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":438 * return self.xp.random.normal(scale=scale, size=prod(shape)).reshape(shape) * * def update_averages(self, ema, weights, t, max_decay=0.9999): # <<<<<<<<<<<<<< * cdef weight_t decay = (1.0 + t) / (10.0 + t) * if decay > max_decay: */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_79update_averages, 0, __pyx_n_s_Ops_update_averages, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__55)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__207); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_update_averages, __pyx_t_7) < 0) __PYX_ERR(0, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":444 * ema -= (1-decay) * (ema - weights) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * mom1 *= beta1 */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_81adam, 0, __pyx_n_s_Ops_adam, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__56)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 444, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_7, __pyx_tuple__209); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_adam, __pyx_t_7) < 0) __PYX_ERR(0, 444, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":455 * gradient.fill(0) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_83clip_gradient, 0, __pyx_n_s_Ops_clip_gradient, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__57)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 455, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_clip_gradient, __pyx_t_7) < 0) __PYX_ERR(0, 455, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":461 * gradient *= threshold / grad_norm * * def logloss(self, y_true, y_pred): # <<<<<<<<<<<<<< * log_yp = self.xp.log(y_pred + 1e-8) * loss = (y_true * log_yp) + (1-y_true) * self.xp.log((1-y_pred)+1e-8) */ __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_3Ops_85logloss, 0, __pyx_n_s_Ops_logloss, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__58)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 461, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_logloss, __pyx_t_7) < 0) __PYX_ERR(0, 461, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "thinc/neural/ops.pyx":57 * * * class Ops(object): # <<<<<<<<<<<<<< * device = 'cpu' * xp = None */ __pyx_t_7 = __Pyx_Py3ClassCreate(__pyx_t_1, __pyx_n_s_Ops, __pyx_tuple__145, __pyx_t_2, NULL, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 57, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Ops, __pyx_t_7) < 0) __PYX_ERR(0, 57, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":467 * * * class NumpyOps(Ops): # <<<<<<<<<<<<<< * device = 'cpu' * xp = numpy */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Ops); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_1, __pyx_t_2, __pyx_n_s_NumpyOps, __pyx_n_s_NumpyOps, (PyObject *) NULL, __pyx_n_s_thinc_neural_ops, (PyObject *) NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); /* "thinc/neural/ops.pyx":468 * * class NumpyOps(Ops): * device = 'cpu' # <<<<<<<<<<<<<< * xp = numpy * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_device, __pyx_n_s_cpu) < 0) __PYX_ERR(0, 468, __pyx_L1_error) /* "thinc/neural/ops.pyx":469 * class NumpyOps(Ops): * device = 'cpu' * xp = numpy # <<<<<<<<<<<<<< * * def asarray(self, data, dtype=None): */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_numpy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 469, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_xp, __pyx_t_9) < 0) __PYX_ERR(0, 469, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":471 * xp = numpy * * def asarray(self, data, dtype=None): # <<<<<<<<<<<<<< * if isinstance(data, self.xp.ndarray): * if dtype is not None: */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_1asarray, 0, __pyx_n_s_NumpyOps_asarray, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__59)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__213); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_asarray, __pyx_t_9) < 0) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":488 * * * def allocate(self, shape, dtype='float32'): # <<<<<<<<<<<<<< * if isinstance(shape, integer_types): * shape = (shape,) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_3allocate, 0, __pyx_n_s_NumpyOps_allocate, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__60)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__215); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_allocate, __pyx_t_9) < 0) __PYX_ERR(0, 488, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":493 * return self.xp.zeros(shape, dtype=dtype) * * def inplace_add(self, np.ndarray x, np.ndarray y, float scale=1.0): # <<<<<<<<<<<<<< * VecVec.add_i(x.data, * y.data, scale, x.shape[0]) */ __pyx_t_9 = PyFloat_FromDouble(((double)1.0)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_5inplace_add, 0, __pyx_n_s_NumpyOps_inplace_add, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__61)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_inplace_add, __pyx_t_9) < 0) __PYX_ERR(0, 493, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":497 * y.data, scale, x.shape[0]) * * def matmul(self, float[:, :, ::1] x, float[:, :, ::1] y, out=None): # <<<<<<<<<<<<<< * assert x.shape[0] == y.shape[0] * assert x.shape[2] == y.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_7matmul, 0, __pyx_n_s_NumpyOps_matmul, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__62)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__218); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_matmul, __pyx_t_9) < 0) __PYX_ERR(0, 497, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":512 * return out_array * * def gemm(self, const float[:, ::1] x, const float[:, ::1] y, trans1=False, trans2=False, # <<<<<<<<<<<<<< * out=None): * cdef int m */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_9gemm, 0, __pyx_n_s_NumpyOps_gemm, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__63)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__220); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_gemm, __pyx_t_9) < 0) __PYX_ERR(0, 512, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":534 * return out_array * * def affine(self, weights, bias, signal): # <<<<<<<<<<<<<< * dotted = self.gemm(signal, weights, trans2=True) * dotted += bias */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_11affine, 0, __pyx_n_s_NumpyOps_affine, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__64)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 534, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_affine, __pyx_t_9) < 0) __PYX_ERR(0, 534, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":539 * return dotted * * def elu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_13elu, 0, __pyx_n_s_NumpyOps_elu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__65)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__223); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_elu, __pyx_t_9) < 0) __PYX_ERR(0, 539, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":546 * data[i] = expf(data[i])-1. * * def selu(self, ndarray X, inplace=True): # <<<<<<<<<<<<<< * cdef weight_t* data = X.data * cdef size_t size = X.size */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_15selu, 0, __pyx_n_s_NumpyOps_selu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__66)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 546, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__225); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_selu, __pyx_t_9) < 0) __PYX_ERR(0, 546, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":556 * data[i] *= scale * * def backprop_selu(self, ndarray delta_, ndarray signal_in_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_17backprop_selu, 0, __pyx_n_s_NumpyOps_backprop_selu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__67)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__227); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_selu, __pyx_t_9) < 0) __PYX_ERR(0, 556, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":570 * delta[i] *= alpha * expf(signal_in[i]) * * def backprop_elu(self, ndarray delta_, ndarray signal_out_, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the ELU transformation */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_19backprop_elu, 0, __pyx_n_s_NumpyOps_backprop_elu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__68)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__229); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_elu, __pyx_t_9) < 0) __PYX_ERR(0, 570, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":582 * delta[i] *= signal_out[i] + 1. * * def relu(self, ndarray X, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray out = X if inplace else X.copy() * cdef weight_t* data = out.data */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_21relu, 0, __pyx_n_s_NumpyOps_relu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__69)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 582, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__231); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_relu, __pyx_t_9) < 0) __PYX_ERR(0, 582, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":591 * return out * * def backprop_relu(self, ndarray dY, ndarray Y, inplace=False): # <<<<<<<<<<<<<< * cdef np.ndarray dX = dY if inplace else dY.copy() * cdef size_t size = dX.size */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_23backprop_relu, 0, __pyx_n_s_NumpyOps_backprop_relu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__70)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__233); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_relu, __pyx_t_9) < 0) __PYX_ERR(0, 591, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":601 * return dX * * def maxout(self, const float[:, :, ::1] py_cands): # <<<<<<<<<<<<<< * cdef Pool mem = Pool() * cdef int B = py_cands.shape[0] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_25maxout, 0, __pyx_n_s_NumpyOps_maxout, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__71)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_maxout, __pyx_t_9) < 0) __PYX_ERR(0, 601, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":613 * return best, which * * def backprop_maxout(self, const float[:, ::1] dX__bo, int[:, ::1] which__bo, int P): # <<<<<<<<<<<<<< * cdef int B = dX__bo.shape[0] * cdef int O = dX__bo.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_27backprop_maxout, 0, __pyx_n_s_NumpyOps_backprop_maxout, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__72)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_maxout, __pyx_t_9) < 0) __PYX_ERR(0, 613, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":622 * return dX__bop * * def mish(self, const float[:, ::1] X, threshold=5, out=None): # <<<<<<<<<<<<<< * shape = [X.shape[i] for i in range(X.ndim)] * cdef np.ndarray Y = self.allocate(tuple(shape), dtype="f") */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_29mish, 0, __pyx_n_s_NumpyOps_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__73)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 622, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__237); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_mish, __pyx_t_9) < 0) __PYX_ERR(0, 622, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":633 * return Y * * def backprop_mish(self, const float[:, ::1] dY, const float[:, ::1] X, # <<<<<<<<<<<<<< * threshold=5, out=None): * shape = [X.shape[i] for i in range(X.ndim)] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_31backprop_mish, 0, __pyx_n_s_NumpyOps_backprop_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__74)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 633, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__239); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_mish, __pyx_t_9) < 0) __PYX_ERR(0, 633, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":658 * # cells.shape[0], cells.shape[1]) * * def seq2col(self, const float[:, ::1] seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_33seq2col, 0, __pyx_n_s_NumpyOps_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__75)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_seq2col, __pyx_t_9) < 0) __PYX_ERR(0, 658, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":669 * return cols * * def backprop_seq2col(self, const float[:, ::1] dY, int nW): # <<<<<<<<<<<<<< * cdef int B = dY.shape[0] * cdef int nF = nW*2+1 */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_35backprop_seq2col, 0, __pyx_n_s_NumpyOps_backprop_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__76)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 669, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_seq2col, __pyx_t_9) < 0) __PYX_ERR(0, 669, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":677 * return dX * * def remap_ids(self, PreshMap mapping, uint64_t[::1] ids_mv, uint64_t value=0): # <<<<<<<<<<<<<< * cdef uint64_t* ids = &ids_mv[0] * cdef ndarray[uint64_t] output_arr = self.allocate(len(ids_mv), dtype='uint64') */ __pyx_t_9 = __Pyx_PyInt_From_uint64_t(((uint64_t)0)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_37remap_ids, 0, __pyx_n_s_NumpyOps_remap_ids, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__77)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_remap_ids, __pyx_t_9) < 0) __PYX_ERR(0, 677, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":696 * return output_arr * * def increment_slices(self, ndarray contig_array, ndarray _to_add, _starts): # <<<<<<<<<<<<<< * cdef ndarray contig_to_add = self.xp.ascontiguousarray(_to_add, dtype='float32') * cdef ndarray contig_starts = self.xp.ascontiguousarray(_starts, dtype='int32') */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_39increment_slices, 0, __pyx_n_s_NumpyOps_increment_slices, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__78)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_increment_slices, __pyx_t_9) < 0) __PYX_ERR(0, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":713 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, const uint64_t[::1] ids, uint32_t seed): # <<<<<<<<<<<<<< * '''Hash a sequence of 64-bit keys into a table with 4 32-bit keys''' * # Written to mirror the GPU implementation */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_41hash, 0, __pyx_n_s_NumpyOps_hash, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__79)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 713, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_hash, __pyx_t_9) < 0) __PYX_ERR(0, 713, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":731 * return keys * * def mean_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_43mean_pool, 0, __pyx_n_s_NumpyOps_mean_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__80)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 731, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_mean_pool, __pyx_t_9) < 0) __PYX_ERR(0, 731, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":743 * return cpu_floats_ptr2array(means, (B, O)) * * def sum_pool(self, const float[:, ::1] X, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_45sum_pool, 0, __pyx_n_s_NumpyOps_sum_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__81)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 743, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_sum_pool, __pyx_t_9) < 0) __PYX_ERR(0, 743, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":755 * return cpu_floats_ptr2array(sums, (B, O)) * * def backprop_mean_pool(self, const float[:, ::1] d_means, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_means.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_47backprop_mean_pool, 0, __pyx_n_s_NumpyOps_backprop_mean_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__82)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 755, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_mean_pool, __pyx_t_9) < 0) __PYX_ERR(0, 755, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":769 * return cpu_floats_ptr2array(dX, (T, O)) * * def backprop_sum_pool(self, const float[:, ::1] d_sums, int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = d_sums.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_49backprop_sum_pool, 0, __pyx_n_s_NumpyOps_backprop_sum_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__83)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 769, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_sum_pool, __pyx_t_9) < 0) __PYX_ERR(0, 769, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":783 * * * def max_pool(self, const float[:, ::1] X, const int[::1] lengths): # <<<<<<<<<<<<<< * cdef int B = lengths.shape[0] * cdef int O = X.shape[1] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_51max_pool, 0, __pyx_n_s_NumpyOps_max_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__84)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_max_pool, __pyx_t_9) < 0) __PYX_ERR(0, 783, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":799 * return py_best, py_which * * def backprop_max_pool(self, const float[:, ::1] d_maxes, # <<<<<<<<<<<<<< * const int[:, ::1] which, const int[::1] lengths): * cdef int B = lengths.shape[0] */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_53backprop_max_pool, 0, __pyx_n_s_NumpyOps_backprop_max_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__85)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_max_pool, __pyx_t_9) < 0) __PYX_ERR(0, 799, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":814 * return cpu_floats_ptr2array(dX, (T, O)) * * def add_sum(self, np.ndarray out, np.ndarray to_sum): # <<<<<<<<<<<<<< * VecVec.batch_add_i(out.data, * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_55add_sum, 0, __pyx_n_s_NumpyOps_add_sum, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__86)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 814, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_add_sum, __pyx_t_9) < 0) __PYX_ERR(0, 814, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":818 * to_sum.data, 1., to_sum.shape[1], to_sum.shape[0]) * * def scatter_add(self, np.ndarray out, np.ndarray ids, np.ndarray inputs): # <<<<<<<<<<<<<< * if out.dtype == 'float32' \ * and ids.dtype == 'int32' \ */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_57scatter_add, 0, __pyx_n_s_NumpyOps_scatter_add, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__87)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_scatter_add, __pyx_t_9) < 0) __PYX_ERR(0, 818, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":840 * def adam(self, float[::1] weights, float[::1] gradient, float[::1] mom1, * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): # <<<<<<<<<<<<<< * _adam_momentum(&gradient[0], &mom1[0], &mom2[0], * weights.shape[0], beta1, beta2, eps, learn_rate) */ __pyx_t_9 = PyFloat_FromDouble(((double)1.)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "thinc/neural/ops.pyx":838 * @cython.boundscheck(False) * @cython.wraparound(False) * def adam(self, float[::1] weights, float[::1] gradient, float[::1] mom1, # <<<<<<<<<<<<<< * float[::1] mom2, const float beta1, const float beta2, float eps, * float learn_rate, float mod_rate=1.): */ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_59adam, 0, __pyx_n_s_NumpyOps_adam, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__88)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_adam, __pyx_t_9) < 0) __PYX_ERR(0, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":847 * memset(&gradient[0], 0, gradient.size * sizeof(float)) * * def ngrams(self, int n, const uint64_t[::1] keys_): # <<<<<<<<<<<<<< * keys = &keys_[0] * length = max(0, keys_.shape[0]-n) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_61ngrams, 0, __pyx_n_s_NumpyOps_ngrams, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__89)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_ngrams, __pyx_t_9) < 0) __PYX_ERR(0, 847, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":856 * return output_ * * def position_encode(self, int N, int D, int period=10000, out=None): # <<<<<<<<<<<<<< * cdef np.ndarray out_ * if out is None: */ __pyx_t_9 = __Pyx_PyInt_From_int(((int)0x2710)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_9); __Pyx_INCREF(((PyObject *)Py_None)); __Pyx_GIVEREF(((PyObject *)Py_None)); PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)Py_None)); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_8NumpyOps_63position_encode, 0, __pyx_n_s_NumpyOps_position_encode, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__90)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_t_8); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_position_encode, __pyx_t_9) < 0) __PYX_ERR(0, 856, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":467 * * * class NumpyOps(Ops): # <<<<<<<<<<<<<< * device = 'cpu' * xp = numpy */ __pyx_t_9 = __Pyx_Py3ClassCreate(__pyx_t_1, __pyx_n_s_NumpyOps, __pyx_t_2, __pyx_t_7, NULL, 0, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_d, __pyx_n_s_NumpyOps, __pyx_t_9) < 0) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "thinc/neural/ops.pyx":973 * * * class CupyOps(Ops): # <<<<<<<<<<<<<< * device = 'gpu' * xp = cupy */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Ops); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_1, __pyx_n_s_CupyOps, __pyx_n_s_CupyOps, (PyObject *) NULL, __pyx_n_s_thinc_neural_ops, (PyObject *) NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); /* "thinc/neural/ops.pyx":974 * * class CupyOps(Ops): * device = 'gpu' # <<<<<<<<<<<<<< * xp = cupy * */ if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_device, __pyx_n_s_gpu) < 0) __PYX_ERR(0, 974, __pyx_L1_error) /* "thinc/neural/ops.pyx":975 * class CupyOps(Ops): * device = 'gpu' * xp = cupy # <<<<<<<<<<<<<< * * def matmul(self, x, y, out=None): */ __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_cupy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_xp, __pyx_t_9) < 0) __PYX_ERR(0, 975, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":977 * xp = cupy * * def matmul(self, x, y, out=None): # <<<<<<<<<<<<<< * return self.xp.matmul(x, y, out=out) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_1matmul, 0, __pyx_n_s_CupyOps_matmul, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__91)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__257); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_matmul, __pyx_t_9) < 0) __PYX_ERR(0, 977, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":980 * return self.xp.matmul(x, y, out=out) * * def gemm(self, x, y, out=None, trans1=False, trans2=False): # <<<<<<<<<<<<<< * if trans1: * x = x.T */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_3gemm, 0, __pyx_n_s_CupyOps_gemm, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__92)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 980, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__259); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_gemm, __pyx_t_9) < 0) __PYX_ERR(0, 980, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":991 * return out * * def asarray(self, X, dtype=None): # <<<<<<<<<<<<<< * if isinstance(X, cupy.ndarray): * return self.xp.asarray(X, dtype=dtype) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_5asarray, 0, __pyx_n_s_CupyOps_asarray, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__93)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__261); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_asarray, __pyx_t_9) < 0) __PYX_ERR(0, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1003 * return self.xp.array(X, dtype=dtype) * * def maxout(self, X): # <<<<<<<<<<<<<< * return _custom_kernels.maxout(X) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_7maxout, 0, __pyx_n_s_CupyOps_maxout, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__94)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1003, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_maxout, __pyx_t_9) < 0) __PYX_ERR(0, 1003, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1006 * return _custom_kernels.maxout(X) * * def backprop_maxout(self, dY, which, int P): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_maxout(dY, which, P) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_9backprop_maxout, 0, __pyx_n_s_CupyOps_backprop_maxout, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__95)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_maxout, __pyx_t_9) < 0) __PYX_ERR(0, 1006, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1009 * return _custom_kernels.backprop_maxout(dY, which, P) * * def relu(self, X, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return X * (X > 0) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_11relu, 0, __pyx_n_s_CupyOps_relu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__96)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__265); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_relu, __pyx_t_9) < 0) __PYX_ERR(0, 1009, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1016 * return X * * def backprop_relu(self, delta_, signal_out, inplace=False): # <<<<<<<<<<<<<< * if not inplace: * return delta_ * (signal_out > 0) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_13backprop_relu, 0, __pyx_n_s_CupyOps_backprop_relu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__97)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1016, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__267); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_relu, __pyx_t_9) < 0) __PYX_ERR(0, 1016, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1022 * return delta_ * * def selu(self, X, inplace=True): # <<<<<<<<<<<<<< * cdef float scale = 1.0507009873554805 * cdef float alpha = 1.6732632423543772 */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_15selu, 0, __pyx_n_s_CupyOps_selu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__98)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1022, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__269); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_selu, __pyx_t_9) < 0) __PYX_ERR(0, 1022, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1030 * return out * * def backprop_selu(self, delta, signal_in, # <<<<<<<<<<<<<< * inplace=True): * # Backprop the SELU transformation */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_17backprop_selu, 0, __pyx_n_s_CupyOps_backprop_selu, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__99)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1030, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__271); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_selu, __pyx_t_9) < 0) __PYX_ERR(0, 1030, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1041 * return out * * def mish(self, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.mish(X, threshold=threshold, out=out) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_19mish, 0, __pyx_n_s_CupyOps_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__100)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1041, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__273); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_mish, __pyx_t_9) < 0) __PYX_ERR(0, 1041, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1044 * return _custom_kernels.mish(X, threshold=threshold, out=out) * * def backprop_mish(self, dY, X, threshold=5, out=None): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_21backprop_mish, 0, __pyx_n_s_CupyOps_backprop_mish, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__101)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__275); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_mish, __pyx_t_9) < 0) __PYX_ERR(0, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1047 * return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out) * * def clip_gradient(self, gradient, threshold): # <<<<<<<<<<<<<< * xp = get_array_module(gradient) * grad_norm = xp.linalg.norm(gradient) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_23clip_gradient, 0, __pyx_n_s_CupyOps_clip_gradient, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__102)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1047, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_clip_gradient, __pyx_t_9) < 0) __PYX_ERR(0, 1047, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1053 * gradient *= threshold / grad_norm * * def seq2col(self, seq, int nW): # <<<<<<<<<<<<<< * '''Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence. * The new sequence is constructed by concatenating nW preceding and succeeding */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_25seq2col, 0, __pyx_n_s_CupyOps_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__103)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1053, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_seq2col, __pyx_t_9) < 0) __PYX_ERR(0, 1053, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1060 * return _custom_kernels.seq2col(seq, nW) * * def backprop_seq2col(self, dY, int nW): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_seq2col(dY, nW) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_27backprop_seq2col, 0, __pyx_n_s_CupyOps_backprop_seq2col, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__104)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1060, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_seq2col, __pyx_t_9) < 0) __PYX_ERR(0, 1060, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1063 * return _custom_kernels.backprop_seq2col(dY, nW) * * def mean_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.mean_pool(X, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_29mean_pool, 0, __pyx_n_s_CupyOps_mean_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__105)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1063, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_mean_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1063, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1066 * return _custom_kernels.mean_pool(X, lengths) * * def backprop_mean_pool(self, d_means, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_mean_pool(d_means, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_31backprop_mean_pool, 0, __pyx_n_s_CupyOps_backprop_mean_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__106)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_mean_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1066, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1069 * return _custom_kernels.backprop_mean_pool(d_means, lengths) * * def max_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.max_pool(X, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_33max_pool, 0, __pyx_n_s_CupyOps_max_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__107)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_max_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1069, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1072 * return _custom_kernels.max_pool(X, lengths) * * def backprop_max_pool(self, d_maxes, which, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_35backprop_max_pool, 0, __pyx_n_s_CupyOps_backprop_max_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__108)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1072, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_max_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1072, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1075 * return _custom_kernels.backprop_max_pool(d_maxes, which, lengths) * * def sum_pool(self, X, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.sum_pool(X, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_37sum_pool, 0, __pyx_n_s_CupyOps_sum_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__109)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1075, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_sum_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1075, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1078 * return _custom_kernels.sum_pool(X, lengths) * * def backprop_sum_pool(self, d_sums, lengths): # <<<<<<<<<<<<<< * return _custom_kernels.backprop_sum_pool(d_sums, lengths) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_39backprop_sum_pool, 0, __pyx_n_s_CupyOps_backprop_sum_pool, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__110)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1078, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_backprop_sum_pool, __pyx_t_9) < 0) __PYX_ERR(0, 1078, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1083 * @cython.boundscheck(False) * @cython.wraparound(False) * def hash(self, ids, uint64_t seed): # <<<<<<<<<<<<<< * return _custom_kernels.hash(ids, seed) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_41hash, 0, __pyx_n_s_CupyOps_hash, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__111)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1083, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_hash, __pyx_t_9) < 0) __PYX_ERR(0, 1083, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1086 * return _custom_kernels.hash(ids, seed) * * def scatter_add(self, out, ids, inputs): # <<<<<<<<<<<<<< * self.xp.scatter_add(out, ids, inputs) * */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_43scatter_add, 0, __pyx_n_s_CupyOps_scatter_add, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__112)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_scatter_add, __pyx_t_9) < 0) __PYX_ERR(0, 1086, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1089 * self.xp.scatter_add(out, ids, inputs) * * def adam(self, weights, gradient, mom1, mom2, beta1, beta2, eps, # <<<<<<<<<<<<<< * learn_rate, mod_rate=1.): * cupy.ElementwiseKernel( */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_45adam, 0, __pyx_n_s_CupyOps_adam, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__113)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1089, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__288); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_adam, __pyx_t_9) < 0) __PYX_ERR(0, 1089, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1101 * gradient.fill(0) * * def normal_init(self, W, fan_in, inplace=True): # <<<<<<<<<<<<<< * scale = self.xp.sqrt(1. / fan_in) * inits = self.xp.random.normal(scale=scale, size=int(prod(W.shape))) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_47normal_init, 0, __pyx_n_s_CupyOps_normal_init, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__115)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_9, __pyx_tuple__290); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_normal_init, __pyx_t_9) < 0) __PYX_ERR(0, 1101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":1111 * return inits * * def position_encode(self, *args, **kwargs): # <<<<<<<<<<<<<< * positions = NumpyOps().position_encode(*args, **kwargs) * return self.asarray(positions) */ __pyx_t_9 = __Pyx_CyFunction_New(&__pyx_mdef_5thinc_6neural_3ops_7CupyOps_49position_encode, 0, __pyx_n_s_CupyOps_position_encode, NULL, __pyx_n_s_thinc_neural_ops, __pyx_d, ((PyObject *)__pyx_codeobj__116)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_SetNameInClass(__pyx_t_7, __pyx_n_s_position_encode, __pyx_t_9) < 0) __PYX_ERR(0, 1111, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "thinc/neural/ops.pyx":973 * * * class CupyOps(Ops): # <<<<<<<<<<<<<< * device = 'gpu' * xp = cupy */ __pyx_t_9 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_CupyOps, __pyx_t_1, __pyx_t_7, NULL, 0, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_d, __pyx_n_s_CupyOps, __pyx_t_9) < 0) __PYX_ERR(0, 973, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1204 * * * def cpu_clip_gradient(weight_t[::1] gradient, weight_t threshold): # <<<<<<<<<<<<<< * grad_norm = Vec.norm(&gradient[0], gradient.shape[0]) * if grad_norm >= threshold: */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5thinc_6neural_3ops_1cpu_clip_gradient, NULL, __pyx_n_s_thinc_neural_ops); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1204, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cpu_clip_gradient, __pyx_t_1) < 0) __PYX_ERR(0, 1204, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1210 * * * def add_gradient_noise(float[::1] gradient, weight_t noise_level, # <<<<<<<<<<<<<< * weight_t timestep): * cdef weight_t variance = noise_level / ((1 + timestep) ** 0.55) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5thinc_6neural_3ops_3add_gradient_noise, NULL, __pyx_n_s_thinc_neural_ops); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_add_gradient_noise, __pyx_t_1) < 0) __PYX_ERR(0, 1210, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thinc/neural/ops.pyx":1 * # cython: cdivision=True, infer_types=True, profile=True # <<<<<<<<<<<<<< * cimport cython * cimport cython.parallel */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(3, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("") # <<<<<<<<<<<<<< * cdef strided = Enum("") # default * cdef indirect = Enum("") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__294, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("") * cdef strided = Enum("") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__295, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("") * cdef strided = Enum("") # default * cdef indirect = Enum("") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__296, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__297, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("") * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__298, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_10[0] = PyThread_allocate_lock(); __pyx_t_10[1] = PyThread_allocate_lock(); __pyx_t_10[2] = PyThread_allocate_lock(); __pyx_t_10[3] = PyThread_allocate_lock(); __pyx_t_10[4] = PyThread_allocate_lock(); __pyx_t_10[5] = PyThread_allocate_lock(); __pyx_t_10[6] = PyThread_allocate_lock(); __pyx_t_10[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_10, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(3, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(3, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(3, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ __Pyx_TraceReturn(Py_None, 0); /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init thinc.neural.ops", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init thinc.neural.ops"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* Profile */ #if CYTHON_PROFILE static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno) { PyObject *type, *value, *traceback; int retval; if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) { if (*code == NULL) { *code = __Pyx_createFrameCodeObject(funcname, srcfile, firstlineno); if (*code == NULL) return 0; } *frame = PyFrame_New( tstate, /*PyThreadState *tstate*/ *code, /*PyCodeObject *code*/ __pyx_d, /*PyObject *globals*/ 0 /*PyObject *locals*/ ); if (*frame == NULL) return 0; if (CYTHON_TRACE && (*frame)->f_trace == NULL) { Py_INCREF(Py_None); (*frame)->f_trace = Py_None; } #if PY_VERSION_HEX < 0x030400B1 } else { (*frame)->f_tstate = tstate; #endif } __Pyx_PyFrame_SetLineNumber(*frame, firstlineno); retval = 1; tstate->tracing++; tstate->use_tracing = 0; __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); #if CYTHON_TRACE if (tstate->c_tracefunc) retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0; if (retval && tstate->c_profilefunc) #endif retval = tstate->c_profilefunc(tstate->c_profileobj, *frame, PyTrace_CALL, NULL) == 0; tstate->use_tracing = (tstate->c_profilefunc || (CYTHON_TRACE && tstate->c_tracefunc)); tstate->tracing--; if (retval) { __Pyx_ErrRestoreInState(tstate, type, value, traceback); return tstate->use_tracing && retval; } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); return -1; } } static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) { PyCodeObject *py_code = 0; #if PY_MAJOR_VERSION >= 3 py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno); if (likely(py_code)) { py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS; } #else PyObject *py_srcfile = 0; PyObject *py_funcname = 0; py_funcname = PyString_FromString(funcname); if (unlikely(!py_funcname)) goto bad; py_srcfile = PyString_FromString(srcfile); if (unlikely(!py_srcfile)) goto bad; py_code = PyCode_New( 0, 0, 0, CO_OPTIMIZED | CO_NEWLOCALS, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ firstlineno, __pyx_empty_bytes /*PyObject *lnotab*/ ); bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); #endif return py_code; } #endif /* PyObjectSetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_setattro)) return tp->tp_setattro(obj, attr_name, value); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_setattr)) return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); #endif return PyObject_SetAttr(obj, attr_name, value); } #endif /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) { PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname); } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunctionShared */ #include static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) #else if (unlikely(value == NULL || !PyString_Check(value))) #endif { PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { if (unlikely(op == NULL)) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults_size = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) { if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); __Pyx__CyFunction_dealloc(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("", op->func_qualname, (void *)op); #else return PyString_FromFormat("", PyString_AsString(op->func_qualname), (void *)op); #endif } static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS arg0 = PyTuple_GET_ITEM(arg, 0); #else arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; #endif result = (*meth)(self, arg0); #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) Py_DECREF(arg0); #endif return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); } static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { PyObject *result; __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { Py_ssize_t argc; PyObject *new_args; PyObject *self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (unlikely(!new_args)) return NULL; self = PyTuple_GetItem(args, 0); if (unlikely(!self)) { Py_DECREF(new_args); return NULL; } result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_CallAsMethod, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif #if PY_VERSION_HEX >= 0x030800b1 0, #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, #endif }; static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (unlikely(__pyx_CyFunctionType == NULL)) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (unlikely(!m->defaults)) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; m->defaults_size = size; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* CythonFunction */ static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { PyObject *op = __Pyx_CyFunction_Init( PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), ml, flags, qualname, closure, module, globals, code ); if (likely(op)) { PyObject_GC_Track(op); } return op; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyIntCompare */ static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { if (op1 == op2) { Py_RETURN_FALSE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a != b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { int unequal; unsigned long uintval; Py_ssize_t size = Py_SIZE(op1); const digit* digits = ((PyLongObject*)op1)->ob_digit; if (intval == 0) { if (size != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } else if (intval < 0) { if (size >= 0) Py_RETURN_TRUE; intval = -intval; size = -size; } else { if (size <= 0) Py_RETURN_TRUE; } uintval = (unsigned long) intval; #if PyLong_SHIFT * 4 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 4)) { unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 3 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 3)) { unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 2 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 2)) { unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 1 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 1)) { unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); if (unequal != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a != (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } return ( PyObject_RichCompare(op1, op2, Py_NE)); } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a - b); if (likely((x^a) >= 0 || (x^~b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } x = a - b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla - llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("subtract", return NULL) result = ((double)a) - (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } #endif /* PyIntCompare */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { int unequal; unsigned long uintval; Py_ssize_t size = Py_SIZE(op1); const digit* digits = ((PyLongObject*)op1)->ob_digit; if (intval == 0) { if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } else if (intval < 0) { if (size >= 0) Py_RETURN_FALSE; intval = -intval; size = -size; } else { if (size <= 0) Py_RETURN_FALSE; } uintval = (unsigned long) intval; #if PyLong_SHIFT * 4 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 4)) { unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 3 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 3)) { unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 2 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 2)) { unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 1 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 1)) { unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } return ( PyObject_RichCompare(op1, op2, Py_EQ)); } /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_SubtractCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double a = floatval; double b, result; (void)inplace; (void)zerodivision_check; if (likely(PyFloat_CheckExact(op2))) { b = PyFloat_AS_DOUBLE(op2); } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op2))) { b = (double) PyInt_AS_LONG(op2); } else #endif if (likely(PyLong_CheckExact(op2))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op2)->ob_digit; const Py_ssize_t size = Py_SIZE(op2); switch (size) { case 0: b = 0.0; break; case -1: b = -(double) digits[0]; break; case 1: b = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { b = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) b = -b; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { b = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) b = -b; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { b = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) b = -b; break; } } CYTHON_FALLTHROUGH; default: #else { #endif b = PyLong_AsDouble(op2); if (unlikely(b == -1.0 && PyErr_Occurred())) return NULL; } } else { return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } PyFPE_START_PROTECT("subtract", return NULL) result = a - b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } #endif /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY #define __Pyx_PyFloat_DivideCObj_ZeroDivisionError(operand) if (unlikely(zerodivision_check && ((operand) == 0))) {\ PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero");\ return NULL;\ } static PyObject* __Pyx_PyFloat_DivideCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double a = floatval; double b, result; (void)inplace; (void)zerodivision_check; if (likely(PyFloat_CheckExact(op2))) { b = PyFloat_AS_DOUBLE(op2); __Pyx_PyFloat_DivideCObj_ZeroDivisionError(b) } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op2))) { b = (double) PyInt_AS_LONG(op2); __Pyx_PyFloat_DivideCObj_ZeroDivisionError(b) } else #endif if (likely(PyLong_CheckExact(op2))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op2)->ob_digit; const Py_ssize_t size = Py_SIZE(op2); switch (size) { case 0: __Pyx_PyFloat_DivideCObj_ZeroDivisionError(0) break; case -1: b = -(double) digits[0]; break; case 1: b = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { b = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) b = -b; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { b = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) b = -b; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { b = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) b = -b; break; } } CYTHON_FALLTHROUGH; default: #else { #endif b = PyLong_AsDouble(op2); if (unlikely(b == -1.0 && PyErr_Occurred())) return NULL; __Pyx_PyFloat_DivideCObj_ZeroDivisionError(b) } } else { return (inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2)); } __Pyx_PyFloat_DivideCObj_ZeroDivisionError(b) PyFPE_START_PROTECT("divide", return NULL) result = a / b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } #endif /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_AddCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double a = floatval; double b, result; (void)inplace; (void)zerodivision_check; if (likely(PyFloat_CheckExact(op2))) { b = PyFloat_AS_DOUBLE(op2); } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op2))) { b = (double) PyInt_AS_LONG(op2); } else #endif if (likely(PyLong_CheckExact(op2))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op2)->ob_digit; const Py_ssize_t size = Py_SIZE(op2); switch (size) { case 0: b = 0.0; break; case -1: b = -(double) digits[0]; break; case 1: b = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { b = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) b = -b; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { b = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) b = -b; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { b = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) b = -b; break; } } CYTHON_FALLTHROUGH; default: #else { #endif b = PyLong_AsDouble(op2); if (unlikely(b == -1.0 && PyErr_Occurred())) return NULL; } } else { return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } PyFPE_START_PROTECT("add", return NULL) result = a + b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op2))) { const long a = intval; long x; long b = PyInt_AS_LONG(op2); x = (long)((unsigned long)a - b); if (likely((x^a) >= 0 || (x^~b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op2))) { const long a = intval; long b, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG lla = intval; PY_LONG_LONG llb, llx; #endif const digit* digits = ((PyLongObject*)op2)->ob_digit; const Py_ssize_t size = Py_SIZE(op2); if (likely(__Pyx_sst_abs(size) <= 1)) { b = likely(size) ? digits[0] : 0; if (size == -1) b = -b; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { b = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { b = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { b = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { b = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { b = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { b = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } x = a - b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla - llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op2)) { const long a = intval; double b = PyFloat_AS_DOUBLE(op2); double result; PyFPE_START_PROTECT("subtract", return NULL) result = ((double)a) - (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* SliceObject */ static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_ass_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_ass_slice(obj, cstart, cstop, value); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_ass_subscript)) #endif { int result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_ass_subscript(obj, py_slice, value); #else result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object does not support slice %.10s", Py_TYPE(obj)->tp_name, value ? "assignment" : "deletion"); bad: return -1; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddCObj(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op2))) { const long a = intval; long x; long b = PyInt_AS_LONG(op2); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op2))) { const long a = intval; long b, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG lla = intval; PY_LONG_LONG llb, llx; #endif const digit* digits = ((PyLongObject*)op2)->ob_digit; const Py_ssize_t size = Py_SIZE(op2); if (likely(__Pyx_sst_abs(size) <= 1)) { b = likely(size) ? digits[0] : 0; if (size == -1) b = -b; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { b = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { b = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { b = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { b = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { b = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { llb = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { b = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { llb = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op2)) { const long a = intval; double b = PyFloat_AS_DOUBLE(op2); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_AddObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double b = floatval; double a, result; (void)inplace; (void)zerodivision_check; if (likely(PyFloat_CheckExact(op1))) { a = PyFloat_AS_DOUBLE(op1); } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { a = (double) PyInt_AS_LONG(op1); } else #endif if (likely(PyLong_CheckExact(op1))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); switch (size) { case 0: a = 0.0; break; case -1: a = -(double) digits[0]; break; case 1: a = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) a = -a; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) a = -a; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) a = -a; break; } } CYTHON_FALLTHROUGH; default: #else { #endif a = PyLong_AsDouble(op1); if (unlikely(a == -1.0 && PyErr_Occurred())) return NULL; } } else { return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } PyFPE_START_PROTECT("add", return NULL) result = a + b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } #endif /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_NeObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double b = floatval; double a; (void)inplace; (void)zerodivision_check; if (op1 == op2) { Py_RETURN_FALSE; } if (likely(PyFloat_CheckExact(op1))) { a = PyFloat_AS_DOUBLE(op1); } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { a = (double) PyInt_AS_LONG(op1); } else #endif if (likely(PyLong_CheckExact(op1))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); switch (size) { case 0: a = 0.0; break; case -1: a = -(double) digits[0]; break; case 1: a = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) a = -a; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) a = -a; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) a = -a; break; } } CYTHON_FALLTHROUGH; default: #else { #endif return ( PyFloat_Type.tp_richcompare(op2, op1, Py_NE)); } } else { return ( PyObject_RichCompare(op1, op2, Py_NE)); } if (a != b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* BufferGetAndValidate */ static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (unlikely(info->buf == NULL)) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static int __Pyx__GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { buf->buf = NULL; if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { __Pyx_ZeroBuffer(buf); return -1; } if (unlikely(buf->ndim != nd)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if (unlikely((size_t)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_SafeReleaseBuffer(buf); return -1; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* PyFloatBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyFloat_SubtractObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { const double b = floatval; double a, result; (void)inplace; (void)zerodivision_check; if (likely(PyFloat_CheckExact(op1))) { a = PyFloat_AS_DOUBLE(op1); } else #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { a = (double) PyInt_AS_LONG(op1); } else #endif if (likely(PyLong_CheckExact(op1))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); switch (size) { case 0: a = 0.0; break; case -1: a = -(double) digits[0]; break; case 1: a = (double) digits[0]; break; case -2: case 2: if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -2) a = -a; break; } } CYTHON_FALLTHROUGH; case -3: case 3: if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -3) a = -a; break; } } CYTHON_FALLTHROUGH; case -4: case 4: if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { if (size == -4) a = -a; break; } } CYTHON_FALLTHROUGH; default: #else { #endif a = PyLong_AsDouble(op1); if (unlikely(a == -1.0 && PyErr_Occurred())) return NULL; } } else { return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } PyFPE_START_PROTECT("subtract", return NULL) result = a - b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } #endif /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { if (unlikely(PyTuple_Check(key))) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) { PyErr_SetObject(PyExc_KeyError, args); Py_DECREF(args); } } else { PyErr_SetObject(PyExc_KeyError, key); } } return NULL; } Py_INCREF(value); return value; } #endif /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return PyUnicode_FromUnicode(NULL, 0); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* GetVTable */ static void* __Pyx_GetVtable(PyObject *dict) { void* ptr; PyObject *ob = PyObject_GetItem(dict, __pyx_n_s_pyx_vtable); if (!ob) goto bad; #if PY_VERSION_HEX >= 0x02070000 ptr = PyCapsule_GetPointer(ob, 0); #else ptr = PyCObject_AsVoidPtr(ob); #endif if (!ptr && !PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); Py_DECREF(ob); return ptr; bad: Py_XDECREF(ob); return NULL; } /* CalculateMetaclass */ static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); for (i=0; i < nbases; i++) { PyTypeObject *tmptype; PyObject *tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); #if PY_MAJOR_VERSION < 3 if (tmptype == &PyClass_Type) continue; #endif if (!metaclass) { metaclass = tmptype; continue; } if (PyType_IsSubtype(metaclass, tmptype)) continue; if (PyType_IsSubtype(tmptype, metaclass)) { metaclass = tmptype; continue; } PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases"); return NULL; } if (!metaclass) { #if PY_MAJOR_VERSION < 3 metaclass = &PyClass_Type; #else metaclass = &PyType_Type; #endif } Py_INCREF((PyObject*) metaclass); return (PyObject*) metaclass; } /* Py3ClassCreate */ static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { PyObject *ns; if (metaclass) { PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); if (prep) { PyObject *pargs = PyTuple_Pack(2, name, bases); if (unlikely(!pargs)) { Py_DECREF(prep); return NULL; } ns = PyObject_Call(prep, pargs, mkw); Py_DECREF(prep); Py_DECREF(pargs); } else { if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; PyErr_Clear(); ns = PyDict_New(); } } else { ns = PyDict_New(); } if (unlikely(!ns)) return NULL; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; return ns; bad: Py_DECREF(ns); return NULL; } static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass) { PyObject *result, *margs; PyObject *owned_metaclass = NULL; if (allow_py2_metaclass) { owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); if (owned_metaclass) { metaclass = owned_metaclass; } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { PyErr_Clear(); } else { return NULL; } } if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); Py_XDECREF(owned_metaclass); if (unlikely(!metaclass)) return NULL; owned_metaclass = metaclass; } margs = PyTuple_Pack(3, name, bases, dict); if (unlikely(!margs)) { result = NULL; } else { result = PyObject_Call(metaclass, margs, mkw); Py_DECREF(margs); } Py_XDECREF(owned_metaclass); return result; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float__const__(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_float__const__, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float__const__(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_float__const__, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_nn_uint64_t, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint64_t(uint64_t value) { const uint64_t neg_one = (uint64_t) ((uint64_t) 0 - (uint64_t) 1), const_zero = (uint64_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(uint64_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(uint64_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(uint64_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(uint64_t), little, !is_unsigned); } } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn_uint64_t__const__(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_nn_uint64_t__const__, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int__const__(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int__const__, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int__const__(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_int__const__, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_5thinc_8typedefs_weight_t(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_nn___pyx_t_5thinc_8typedefs_weight_t, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(float *) itemp); } static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) { float value = __pyx_PyFloat_AsFloat(obj); if ((value == (float)-1) && PyErr_Occurred()) return 0; *(float *) itemp = value; return 1; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_float__const__(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(float const *) itemp); } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_nn_uint64_t(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_uint64_t(*(uint64_t *) itemp); } static CYTHON_INLINE int __pyx_memview_set_nn_uint64_t(const char *itemp, PyObject *obj) { uint64_t value = __Pyx_PyInt_As_uint64_t(obj); if ((value == ((uint64_t)-1)) && PyErr_Occurred()) return 0; *(uint64_t *) itemp = value; return 1; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_nn_uint64_t__const__(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_uint64_t(*(uint64_t const *) itemp); } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_int(*(int *) itemp); } static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj) { int value = __Pyx_PyInt_As_int(obj); if ((value == (int)-1) && PyErr_Occurred()) return 0; *(int *) itemp = value; return 1; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_int__const__(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_int(*(int const *) itemp); } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = (float)(1.0) / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = (float)(1.0) / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0.0, -1.0); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = (double)(1.0) / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = (double)(1.0) / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0.0, -1.0); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int32_t(int32_t value) { const int32_t neg_one = (int32_t) ((int32_t) 0 - (int32_t) 1), const_zero = (int32_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int32_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int32_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int32_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int32_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int32_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int32_t), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *x) { const uint64_t neg_one = (uint64_t) ((uint64_t) 0 - (uint64_t) 1), const_zero = (uint64_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint64_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint64_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint64_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint64_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, digits[0]) case 2: if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 2 * PyLong_SHIFT) { return (uint64_t) (((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; case 3: if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 3 * PyLong_SHIFT) { return (uint64_t) (((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; case 4: if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) >= 4 * PyLong_SHIFT) { return (uint64_t) (((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint64_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint64_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint64_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint64_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, +digits[0]) case -2: if (8 * sizeof(uint64_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { return (uint64_t) ((((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { return (uint64_t) ((((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) { return (uint64_t) (((uint64_t)-1)*(((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) { return (uint64_t) ((((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]))); } } break; } #endif if (sizeof(uint64_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint64_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint64_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint64_t) -1; } } else { uint64_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint64_t) -1; val = __Pyx_PyInt_As_uint64_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint64_t"); return (uint64_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint64_t"); return (uint64_t) -1; } /* CIntFromPy */ static CYTHON_INLINE uint32_t __Pyx_PyInt_As_uint32_t(PyObject *x) { const uint32_t neg_one = (uint32_t) ((uint32_t) 0 - (uint32_t) 1), const_zero = (uint32_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(uint32_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(uint32_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (uint32_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, digits[0]) case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 2 * PyLong_SHIFT) { return (uint32_t) (((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 3 * PyLong_SHIFT) { return (uint32_t) (((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) >= 4 * PyLong_SHIFT) { return (uint32_t) (((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (uint32_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(uint32_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (uint32_t) 0; case -1: __PYX_VERIFY_RETURN_INT(uint32_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(uint32_t, digit, +digits[0]) case -2: if (8 * sizeof(uint32_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 2: if (8 * sizeof(uint32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { return (uint32_t) ((((((uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -3: if (8 * sizeof(uint32_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 3: if (8 * sizeof(uint32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { return (uint32_t) ((((((((uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case -4: if (8 * sizeof(uint32_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) (((uint32_t)-1)*(((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; case 4: if (8 * sizeof(uint32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(uint32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(uint32_t) - 1 > 4 * PyLong_SHIFT) { return (uint32_t) ((((((((((uint32_t)digits[3]) << PyLong_SHIFT) | (uint32_t)digits[2]) << PyLong_SHIFT) | (uint32_t)digits[1]) << PyLong_SHIFT) | (uint32_t)digits[0]))); } } break; } #endif if (sizeof(uint32_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(uint32_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(uint32_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else uint32_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (uint32_t) -1; } } else { uint32_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (uint32_t) -1; val = __Pyx_PyInt_As_uint32_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to uint32_t"); return (uint32_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to uint32_t"); return (uint32_t) -1; } /* CIntFromPy */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(size_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (size_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (size_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(size_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) case -2: if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -3: if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -4: if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; } #endif if (sizeof(size_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else size_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (size_t) -1; } } else { size_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (size_t) -1; val = __Pyx_PyInt_As_size_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to size_t"); return (size_t) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE int32_t __Pyx_PyInt_As_int32_t(PyObject *x) { const int32_t neg_one = (int32_t) ((int32_t) 0 - (int32_t) 1), const_zero = (int32_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int32_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int32_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int32_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int32_t) 0; case 1: __PYX_VERIFY_RETURN_INT(int32_t, digit, digits[0]) case 2: if (8 * sizeof(int32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) >= 2 * PyLong_SHIFT) { return (int32_t) (((((int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0])); } } break; case 3: if (8 * sizeof(int32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) >= 3 * PyLong_SHIFT) { return (int32_t) (((((((int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0])); } } break; case 4: if (8 * sizeof(int32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) >= 4 * PyLong_SHIFT) { return (int32_t) (((((((((int32_t)digits[3]) << PyLong_SHIFT) | (int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int32_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int32_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int32_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int32_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int32_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int32_t) 0; case -1: __PYX_VERIFY_RETURN_INT(int32_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int32_t, digit, +digits[0]) case -2: if (8 * sizeof(int32_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 2 * PyLong_SHIFT) { return (int32_t) (((int32_t)-1)*(((((int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; case 2: if (8 * sizeof(int32_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 2 * PyLong_SHIFT) { return (int32_t) ((((((int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; case -3: if (8 * sizeof(int32_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 3 * PyLong_SHIFT) { return (int32_t) (((int32_t)-1)*(((((((int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; case 3: if (8 * sizeof(int32_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 3 * PyLong_SHIFT) { return (int32_t) ((((((((int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; case -4: if (8 * sizeof(int32_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 4 * PyLong_SHIFT) { return (int32_t) (((int32_t)-1)*(((((((((int32_t)digits[3]) << PyLong_SHIFT) | (int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; case 4: if (8 * sizeof(int32_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int32_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int32_t) - 1 > 4 * PyLong_SHIFT) { return (int32_t) ((((((((((int32_t)digits[3]) << PyLong_SHIFT) | (int32_t)digits[2]) << PyLong_SHIFT) | (int32_t)digits[1]) << PyLong_SHIFT) | (int32_t)digits[0]))); } } break; } #endif if (sizeof(int32_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int32_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int32_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int32_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int32_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int32_t) -1; } } else { int32_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int32_t) -1; val = __Pyx_PyInt_As_int32_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int32_t"); return (int32_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int32_t"); return (int32_t) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* FunctionExport */ static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(__pyx_m, (char *)"__pyx_capi__"); if (!d) { PyErr_Clear(); d = PyDict_New(); if (!d) goto bad; Py_INCREF(d); if (PyModule_AddObject(__pyx_m, (char *)"__pyx_capi__", d) < 0) goto bad; } tmp.fp = f; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(tmp.p, sig, 0); #else cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0); #endif if (!cobj) goto bad; if (PyDict_SetItemString(d, name, cobj) < 0) goto bad; Py_DECREF(cobj); Py_DECREF(d); return 0; bad: Py_XDECREF(cobj); Py_XDECREF(d); return -1; } /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */