Generated by Cython 0.29.21

Yellow lines hint at Python interaction.
Click on a line that starts with a "+" to see the C code that Cython generated for it.

Raw output: simulation.c

+001: cimport numpy as np
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+002: import numpy as np
  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 003: import cython
 004: from libc.string cimport memcmp
 005: from libc.math cimport log
 006: from libc.stdlib cimport abort, malloc, free
+007: from scipy.ndimage import gaussian_gradient_magnitude as ggm, gaussian_filter as gf
  __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_INCREF(__pyx_n_s_gaussian_gradient_magnitude);
  __Pyx_GIVEREF(__pyx_n_s_gaussian_gradient_magnitude);
  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_gaussian_gradient_magnitude);
  __Pyx_INCREF(__pyx_n_s_gaussian_filter);
  __Pyx_GIVEREF(__pyx_n_s_gaussian_filter);
  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_gaussian_filter);
  __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_ndimage, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_gaussian_gradient_magnitude); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_ggm, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_gaussian_filter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_gf, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 008: 
 009: # Numpy must be initialized. When using numpy from C or Cython you must
 010: # *ALWAYS* do that, or you will have segfaults
+011: np.import_array()
  __pyx_t_3 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 11, __pyx_L1_error)
 012: 
+013: def next_fast_len(np.npy_intp target, str backend='numpy'):
/* Python wrapper */
static PyObject *__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_next_fast_len[] = "next_fast_len(npy_intp target, unicode backend=u'numpy')\nFind the next fast size of input data to fft, for zero-padding, etc.\n    FFT algorithms gain their speed by a recursive divide and conquer strategy.\n    This relies on efficient functions for small prime factors of the input length.\n    Thus, the transforms are fastest when using composites of the prime factors handled\n    by the fft implementation. If there are efficient functions for all radices <= n,\n    then the result will be a number x >= target with only prime factors < n. (Also\n    known as n-smooth numbers)\n\n    Parameters\n    ----------\n    target : int\n        Length to start searching from. Must be a positive integer.\n    backend : {'fftw', 'numpy'}, optional\n        Find n-smooth number for the FFT implementation from the specified\n        library.\n\n    Returns\n    -------\n    n : int\n        The smallest fast length greater than or equal to `target`.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_1next_fast_len = {"next_fast_len", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_next_fast_len};
static PyObject *__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  npy_intp __pyx_v_target;
  PyObject *__pyx_v_backend = 0;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("next_fast_len (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_target,&__pyx_n_s_backend,0};
    PyObject* values[2] = {0,0};
    values[1] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_target)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[1] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "next_fast_len") < 0)) __PYX_ERR(0, 13, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_target = __Pyx_PyInt_As_Py_intptr_t(values[0]); if (unlikely((__pyx_v_target == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13, __pyx_L3_error)
    __pyx_v_backend = ((PyObject*)values[1]);
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("next_fast_len", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 13, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.next_fast_len", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 13, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_next_fast_len(__pyx_self, __pyx_v_target, __pyx_v_backend);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_6pyrost_3bin_10simulation_next_fast_len(CYTHON_UNUSED PyObject *__pyx_self, npy_intp __pyx_v_target, PyObject *__pyx_v_backend) {
  PyObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("next_fast_len", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("pyrost.bin.simulation.next_fast_len", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__10 = PyTuple_Pack(2, __pyx_n_s_target, __pyx_n_s_backend); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 13, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__10);
  __Pyx_GIVEREF(__pyx_tuple__10);
  __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_next_fast_len, 13, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 13, __pyx_L1_error)
/* … */
  __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_1next_fast_len, 0, __pyx_n_s_next_fast_len, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__11)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__12);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_next_fast_len, __pyx_t_2) < 0) __PYX_ERR(0, 13, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_tuple__12 = PyTuple_Pack(1, ((PyObject*)__pyx_n_u_numpy)); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 13, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__12);
  __Pyx_GIVEREF(__pyx_tuple__12);
 014:     r"""Find the next fast size of input data to fft, for zero-padding, etc.
 015:     FFT algorithms gain their speed by a recursive divide and conquer strategy.
 016:     This relies on efficient functions for small prime factors of the input length.
 017:     Thus, the transforms are fastest when using composites of the prime factors handled
 018:     by the fft implementation. If there are efficient functions for all radices <= n,
 019:     then the result will be a number x >= target with only prime factors < n. (Also
 020:     known as n-smooth numbers)
 021: 
 022:     Parameters
 023:     ----------
 024:     target : int
 025:         Length to start searching from. Must be a positive integer.
 026:     backend : {'fftw', 'numpy'}, optional
 027:         Find n-smooth number for the FFT implementation from the specified
 028:         library.
 029: 
 030:     Returns
 031:     -------
 032:     n : int
 033:         The smallest fast length greater than or equal to `target`.
 034:     """
+035:     if target < 0:
  __pyx_t_1 = ((__pyx_v_target < 0) != 0);
  if (unlikely(__pyx_t_1)) {
/* … */
  }
+036:         raise ValueError('Target length must be positive')
    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 36, __pyx_L1_error)
/* … */
  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Target_length_must_be_positive); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 36, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple_);
  __Pyx_GIVEREF(__pyx_tuple_);
+037:     if backend == 'fftw':
  __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 37, __pyx_L1_error)
  __pyx_t_3 = (__pyx_t_1 != 0);
  if (__pyx_t_3) {
/* … */
  }
+038:         return next_fast_len_fftw(target)
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = __Pyx_PyInt_From_unsigned_long(next_fast_len_fftw(__pyx_v_target)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 38, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;
+039:     elif backend == 'numpy':
  __pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 39, __pyx_L1_error)
  __pyx_t_1 = (__pyx_t_3 != 0);
  if (likely(__pyx_t_1)) {
/* … */
  }
+040:         return good_size(target)
    __Pyx_XDECREF(__pyx_r);
    __pyx_t_2 = __Pyx_PyInt_From_unsigned_long(good_size(__pyx_v_target)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_r = __pyx_t_2;
    __pyx_t_2 = 0;
    goto __pyx_L0;
 041:     else:
+042:         raise ValueError('{:s} is invalid backend'.format(backend))
  /*else*/ {
    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __pyx_t_5 = NULL;
    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
      __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
      if (likely(__pyx_t_5)) {
        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
        __Pyx_INCREF(__pyx_t_5);
        __Pyx_INCREF(function);
        __Pyx_DECREF_SET(__pyx_t_4, function);
      }
    }
    __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_backend);
    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
    if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_4);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
    __PYX_ERR(0, 42, __pyx_L1_error)
  }
 043: 
 044: # Helper functions
+045: cdef bint fft_faster(np.npy_intp *in1, np.npy_intp *in2, np.npy_intp ndim):
static int __pyx_f_6pyrost_3bin_10simulation_fft_faster(npy_intp *__pyx_v_in1, npy_intp *__pyx_v_in2, npy_intp __pyx_v_ndim) {
  npy_intp __pyx_v_in1_size;
  npy_intp __pyx_v_in2_size;
  int __pyx_v_i;
  npy_intp __pyx_v_direct_ops;
  npy_intp __pyx_v_fft_ops;
  double __pyx_v_O_fft;
  double __pyx_v_O_direct;
  double __pyx_v_O_offset;
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("fft_faster", 0);
/* … */
  /* function exit code */
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
 046:     # Copied from scipy, look scipy/signaltools.py:_fftconv_faster 
+047:     cdef np.npy_intp in1_size = 1
  __pyx_v_in1_size = 1;
+048:     cdef np.npy_intp in2_size = 1
  __pyx_v_in2_size = 1;
 049:     cdef int i
+050:     for i in range(ndim):
  __pyx_t_1 = __pyx_v_ndim;
  __pyx_t_2 = __pyx_t_1;
  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
    __pyx_v_i = __pyx_t_3;
+051:         in1_size *= in1[i]
    __pyx_v_in1_size = (__pyx_v_in1_size * (__pyx_v_in1[__pyx_v_i]));
+052:         in2_size *= in2[i]
    __pyx_v_in2_size = (__pyx_v_in2_size * (__pyx_v_in2[__pyx_v_i]));
  }
 053: 
+054:     cdef np.npy_intp direct_ops = 1
  __pyx_v_direct_ops = 1;
+055:     if ndim == 1:
  __pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
  if (__pyx_t_4) {
/* … */
    goto __pyx_L5;
  }
+056:         direct_ops *= in1[0] * in2[0] if in1[0] < in2[0] else in1[0] * in2[0] - (in2[0] // 2) * ((in2[0] + 1) // 2)
    if ((((__pyx_v_in1[0]) < (__pyx_v_in2[0])) != 0)) {
      __pyx_t_5 = ((__pyx_v_in1[0]) * (__pyx_v_in2[0]));
    } else {
      __pyx_t_5 = (((__pyx_v_in1[0]) * (__pyx_v_in2[0])) - (((__pyx_v_in2[0]) / 2) * (((__pyx_v_in2[0]) + 1) / 2)));
    }
    __pyx_v_direct_ops = (__pyx_v_direct_ops * __pyx_t_5);
 057:     else:
+058:         direct_ops *= in1_size * in2_size
  /*else*/ {
    __pyx_v_direct_ops = (__pyx_v_direct_ops * (__pyx_v_in1_size * __pyx_v_in2_size));
  }
  __pyx_L5:;
 059: 
+060:     cdef np.npy_intp fft_ops = 1
  __pyx_v_fft_ops = 1;
+061:     for i in range(ndim):
  __pyx_t_1 = __pyx_v_ndim;
  __pyx_t_2 = __pyx_t_1;
  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
    __pyx_v_i = __pyx_t_3;
+062:         fft_ops *= in1[i] + in2[i] - 1
    __pyx_v_fft_ops = (__pyx_v_fft_ops * (((__pyx_v_in1[__pyx_v_i]) + (__pyx_v_in2[__pyx_v_i])) - 1));
  }
+063:     fft_ops = <np.npy_intp>(3 * fft_ops * log(<double>fft_ops))  # 3 separate FFTs of size full_out_shape
  __pyx_v_fft_ops = ((npy_intp)((3 * __pyx_v_fft_ops) * log(((double)__pyx_v_fft_ops))));
 064: 
 065:     cdef double offset, O_fft, O_direct, O_offset
+066:     if ndim == 1:
  __pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
  if (__pyx_t_4) {
/* … */
    goto __pyx_L8;
  }
+067:         if in2_size <= in1_size:
    __pyx_t_4 = ((__pyx_v_in2_size <= __pyx_v_in1_size) != 0);
    if (__pyx_t_4) {
/* … */
      goto __pyx_L9;
    }
+068:             O_fft = 3.2646654e-9
      __pyx_v_O_fft = 3.2646654e-9;
+069:             O_direct = 2.8478277e-10
      __pyx_v_O_direct = 2.8478277e-10;
+070:             O_offset = -1e-3
      __pyx_v_O_offset = -1e-3;
 071:         else:
+072:             O_fft = 3.21635404e-9
    /*else*/ {
      __pyx_v_O_fft = 3.21635404e-9;
+073:             O_direct = 1.1773253e-8
      __pyx_v_O_direct = 1.1773253e-8;
+074:             O_offset = -1e-5
      __pyx_v_O_offset = -1e-5;
    }
    __pyx_L9:;
 075:     else:
+076:         O_fft = 2.04735e-9
  /*else*/ {
    __pyx_v_O_fft = 2.04735e-9;
+077:         O_direct = 1.55367e-8
    __pyx_v_O_direct = 1.55367e-8;
+078:         O_offset = -1e-4
    __pyx_v_O_offset = -1e-4;
  }
  __pyx_L8:;
+079:     return (O_fft * fft_ops) < (O_direct * direct_ops + O_offset)
  __pyx_r = ((__pyx_v_O_fft * __pyx_v_fft_ops) < ((__pyx_v_O_direct * __pyx_v_direct_ops) + __pyx_v_O_offset));
  goto __pyx_L0;
 080: 
+081: cdef int extend_mode_to_code(str mode) except -1:
static int __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(PyObject *__pyx_v_mode) {
  int __pyx_r;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("extend_mode_to_code", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("pyrost.bin.simulation.extend_mode_to_code", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = -1;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
+082:     if mode == 'constant':
  __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_constant, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 82, __pyx_L1_error)
  __pyx_t_2 = (__pyx_t_1 != 0);
  if (__pyx_t_2) {
/* … */
  }
+083:         return EXTEND_CONSTANT
    __pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_CONSTANT;
    goto __pyx_L0;
+084:     elif mode == 'nearest':
  __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_nearest, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 84, __pyx_L1_error)
  __pyx_t_1 = (__pyx_t_2 != 0);
  if (__pyx_t_1) {
/* … */
  }
+085:         return EXTEND_NEAREST
    __pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_NEAREST;
    goto __pyx_L0;
+086:     elif mode == 'mirror':
  __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_mirror, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 86, __pyx_L1_error)
  __pyx_t_2 = (__pyx_t_1 != 0);
  if (__pyx_t_2) {
/* … */
  }
+087:         return EXTEND_MIRROR
    __pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_MIRROR;
    goto __pyx_L0;
+088:     elif mode == 'reflect':
  __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_reflect, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 88, __pyx_L1_error)
  __pyx_t_1 = (__pyx_t_2 != 0);
  if (__pyx_t_1) {
/* … */
  }
+089:         return EXTEND_REFLECT
    __pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_REFLECT;
    goto __pyx_L0;
+090:     elif mode == 'wrap':
  __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_wrap, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 90, __pyx_L1_error)
  __pyx_t_2 = (__pyx_t_1 != 0);
  if (likely(__pyx_t_2)) {
/* … */
  }
+091:         return EXTEND_WRAP
    __pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_WRAP;
    goto __pyx_L0;
 092:     else:
+093:         raise RuntimeError('boundary mode not supported')
  /*else*/ {
    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __PYX_ERR(0, 93, __pyx_L1_error)
  }
/* … */
  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_boundary_mode_not_supported); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 93, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__2);
  __Pyx_GIVEREF(__pyx_tuple__2);
 094: 
+095: cdef np.ndarray number_to_array(object num, np.npy_intp rank, int type_num):
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_number_to_array(PyObject *__pyx_v_num, npy_intp __pyx_v_rank, int __pyx_v_type_num) {
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_arr = 0;
  int __pyx_v_i;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("number_to_array", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("pyrost.bin.simulation.number_to_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
+096:     cdef np.npy_intp *dims = [rank,]
  __pyx_t_1[0] = __pyx_v_rank;
  __pyx_v_dims = __pyx_t_1;
+097:     cdef np.ndarray arr = <np.ndarray>np.PyArray_SimpleNew(1, dims, type_num)
  __pyx_t_2 = PyArray_SimpleNew(1, __pyx_v_dims, __pyx_v_type_num); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = __pyx_t_2;
  __Pyx_INCREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_v_arr = ((PyArrayObject *)__pyx_t_3);
  __pyx_t_3 = 0;
 098:     cdef int i
+099:     for i in range(rank):
  __pyx_t_4 = __pyx_v_rank;
  __pyx_t_5 = __pyx_t_4;
  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
    __pyx_v_i = __pyx_t_6;
+100:         arr[i] = num
    if (unlikely(__Pyx_SetItemInt(((PyObject *)__pyx_v_arr), __pyx_v_i, __pyx_v_num, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 100, __pyx_L1_error)
  }
+101:     return arr
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_arr));
  __pyx_r = __pyx_v_arr;
  goto __pyx_L0;
 102: 
+103: cdef np.ndarray normalize_sequence(object inp, np.npy_intp rank, int type_num):
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(PyObject *__pyx_v_inp, npy_intp __pyx_v_rank, int __pyx_v_type_num) {
  PyArrayObject *__pyx_v_arr = 0;
  int __pyx_v_tn;
  npy_intp __pyx_v_size;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("normalize_sequence", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("pyrost.bin.simulation.normalize_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_arr);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
 104:     # If input is a scalar, create a sequence of length equal to the
 105:     # rank by duplicating the input. If input is a sequence,
 106:     # check if its length is equal to the length of array.
 107:     cdef np.ndarray arr
 108:     cdef int tn
+109:     if np.PyArray_IsAnyScalar(inp):
  __pyx_t_1 = (PyArray_IsAnyScalar(__pyx_v_inp) != 0);
  if (__pyx_t_1) {
/* … */
    goto __pyx_L3;
  }
+110:         arr = number_to_array(inp, rank, type_num)
    __pyx_t_2 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_number_to_array(__pyx_v_inp, __pyx_v_rank, __pyx_v_type_num)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_v_arr = ((PyArrayObject *)__pyx_t_2);
    __pyx_t_2 = 0;
+111:     elif np.PyArray_Check(inp):
  __pyx_t_1 = (PyArray_Check(__pyx_v_inp) != 0);
  if (__pyx_t_1) {
/* … */
    goto __pyx_L3;
  }
+112:         arr = <np.ndarray>inp
    __pyx_t_2 = __pyx_v_inp;
    __Pyx_INCREF(__pyx_t_2);
    __pyx_v_arr = ((PyArrayObject *)__pyx_t_2);
    __pyx_t_2 = 0;
+113:         tn = np.PyArray_TYPE(arr)
    __pyx_v_tn = PyArray_TYPE(__pyx_v_arr);
+114:         if tn != type_num:
    __pyx_t_1 = ((__pyx_v_tn != __pyx_v_type_num) != 0);
    if (__pyx_t_1) {
/* … */
    }
+115:             arr = <np.ndarray>np.PyArray_Cast(arr, type_num)
      __pyx_t_2 = PyArray_Cast(__pyx_v_arr, __pyx_v_type_num); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 115, __pyx_L1_error)
      __Pyx_GOTREF(__pyx_t_2);
      __pyx_t_3 = __pyx_t_2;
      __Pyx_INCREF(__pyx_t_3);
      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
      __Pyx_DECREF_SET(__pyx_v_arr, ((PyArrayObject *)__pyx_t_3));
      __pyx_t_3 = 0;
+116:     elif isinstance(inp, (list, tuple)):
  __pyx_t_4 = PyList_Check(__pyx_v_inp); 
  __pyx_t_5 = (__pyx_t_4 != 0);
  if (!__pyx_t_5) {
  } else {
    __pyx_t_1 = __pyx_t_5;
    goto __pyx_L5_bool_binop_done;
  }
  __pyx_t_5 = PyTuple_Check(__pyx_v_inp); 
  __pyx_t_4 = (__pyx_t_5 != 0);
  __pyx_t_1 = __pyx_t_4;
  __pyx_L5_bool_binop_done:;
  __pyx_t_4 = (__pyx_t_1 != 0);
  if (likely(__pyx_t_4)) {
/* … */
    goto __pyx_L3;
  }
+117:         arr = <np.ndarray>np.PyArray_FROM_OTF(inp, type_num, np.NPY_ARRAY_C_CONTIGUOUS)
    __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_inp, __pyx_v_type_num, NPY_ARRAY_C_CONTIGUOUS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_3);
    __pyx_t_2 = __pyx_t_3;
    __Pyx_INCREF(__pyx_t_2);
    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
    __pyx_v_arr = ((PyArrayObject *)__pyx_t_2);
    __pyx_t_2 = 0;
 118:     else:
+119:         raise ValueError("Wrong sequence argument type")
  /*else*/ {
    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 119, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 119, __pyx_L1_error)
  }
  __pyx_L3:;
/* … */
  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Wrong_sequence_argument_type); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 119, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__3);
  __Pyx_GIVEREF(__pyx_tuple__3);
+120:     cdef np.npy_intp size = np.PyArray_SIZE(arr)
  __pyx_v_size = PyArray_SIZE(__pyx_v_arr);
+121:     if size != rank:
  __pyx_t_4 = ((__pyx_v_size != __pyx_v_rank) != 0);
  if (unlikely(__pyx_t_4)) {
/* … */
  }
+122:         raise ValueError("Sequence argument must have length equal to input rank")
    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __PYX_ERR(0, 122, __pyx_L1_error)
/* … */
  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Sequence_argument_must_have_leng); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 122, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__4);
  __Pyx_GIVEREF(__pyx_tuple__4);
+123:     return arr
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_arr));
  __pyx_r = __pyx_v_arr;
  goto __pyx_L0;
 124: 
+125: def gaussian_kernel(sigma: double, order: cython.uint=0, truncate: cython.double=4.) -> np.ndarray:
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_3gaussian_kernel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_2gaussian_kernel[] = "gaussian_kernel(double sigma: double, unsigned int order: cython.uint = 0, double truncate: cython.double = 4.) -> np.ndarray\nDiscrete Gaussian kernel.\n    \n    Parameters\n    ----------\n    sigma : float\n        Standard deviation for Gaussian kernel.\n    order : int, optional\n        The order of the filter. An order of 0 corresponds to convolution with a\n        Gaussian kernel. A positive order corresponds to convolution with that\n        derivative of a Gaussian. Default is 0.\n    truncate : float, optional\n        Truncate the filter at this many standard deviations. Default is 4.0.\n    \n    Returns\n    -------\n    krn : np.ndarray\n        Gaussian kernel.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_3gaussian_kernel = {"gaussian_kernel", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_3gaussian_kernel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_2gaussian_kernel};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_3gaussian_kernel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  double __pyx_v_sigma;
  unsigned int __pyx_v_order;
  double __pyx_v_truncate;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_kernel (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sigma,&__pyx_n_s_order,&__pyx_n_s_truncate,0};
    PyObject* values[3] = {0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_order);
          if (value) { values[1] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate);
          if (value) { values[2] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_kernel") < 0)) __PYX_ERR(0, 125, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_sigma = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 125, __pyx_L3_error)
    if (values[1]) {
      __pyx_v_order = __Pyx_PyInt_As_unsigned_int(values[1]); if (unlikely((__pyx_v_order == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 125, __pyx_L3_error)
    } else {
      __pyx_v_order = ((unsigned int)((unsigned int)0));
    }
    if (values[2]) {
      __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 125, __pyx_L3_error)
    } else {
      __pyx_v_truncate = ((double)((double)4.));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gaussian_kernel", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 125, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_kernel", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_2gaussian_kernel(__pyx_self, __pyx_v_sigma, __pyx_v_order, __pyx_v_truncate);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_2gaussian_kernel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_sigma, unsigned int __pyx_v_order, double __pyx_v_truncate) {
  npy_intp __pyx_v_radius;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  double *__pyx_v__out;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_kernel", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_kernel", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__13 = PyTuple_Pack(7, __pyx_n_s_sigma, __pyx_n_s_order, __pyx_n_s_truncate, __pyx_n_s_radius, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__13);
  __Pyx_GIVEREF(__pyx_tuple__13);
/* … */
  __pyx_t_2 = __Pyx_PyInt_From_unsigned_int(((unsigned int)0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_GIVEREF(__pyx_t_2);
  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_1);
  PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
  __pyx_t_2 = 0;
  __pyx_t_1 = 0;
  __pyx_t_1 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_order, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
  __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_3gaussian_kernel, 0, __pyx_n_s_gaussian_kernel, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__14)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_t_4);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_2, __pyx_t_1);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_kernel, __pyx_t_2) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_kernel, 125, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 125, __pyx_L1_error)
 126:     """Discrete Gaussian kernel.
 127:     
 128:     Parameters
 129:     ----------
 130:     sigma : float
 131:         Standard deviation for Gaussian kernel.
 132:     order : int, optional
 133:         The order of the filter. An order of 0 corresponds to convolution with a
 134:         Gaussian kernel. A positive order corresponds to convolution with that
 135:         derivative of a Gaussian. Default is 0.
 136:     truncate : float, optional
 137:         Truncate the filter at this many standard deviations. Default is 4.0.
 138:     
 139:     Returns
 140:     -------
 141:     krn : np.ndarray
 142:         Gaussian kernel.
 143:     """
+144:     cdef np.npy_intp radius = <np.npy_intp>(sigma * truncate)
  __pyx_v_radius = ((npy_intp)(__pyx_v_sigma * __pyx_v_truncate));
+145:     cdef np.npy_intp *dims = [2 * radius + 1,]
  __pyx_t_1[0] = ((2 * __pyx_v_radius) + 1);
  __pyx_v_dims = __pyx_t_1;
+146:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(1, dims, np.NPY_FLOAT64)
  __pyx_t_2 = PyArray_SimpleNew(1, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 146, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_3 = __pyx_t_2;
  __Pyx_INCREF(__pyx_t_3);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_3);
  __pyx_t_3 = 0;
+147:     cdef double *_out = <double *>np.PyArray_DATA(out)
  __pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+148:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L5:;
      }
  }
+149:         gauss_kernel1d(_out, sigma, order, dims[0])
        gauss_kernel1d(__pyx_v__out, __pyx_v_sigma, __pyx_v_order, (__pyx_v_dims[0]));
      }
+150:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 151: 
+152: cdef np.ndarray gf_fft(np.ndarray inp, np.ndarray sigma, np.ndarray order, str mode,
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_gf_fft(PyArrayObject *__pyx_v_inp, PyArrayObject *__pyx_v_sigma, PyArrayObject *__pyx_v_order, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  int __pyx_v_ndim;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  double *__pyx_v__out;
  double *__pyx_v__inp;
  unsigned long *__pyx_v__dims;
  double *__pyx_v__sig;
  unsigned int *__pyx_v__ord;
  int __pyx_v__mode;
  int __pyx_v_fail;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gf_fft", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_inp);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("pyrost.bin.simulation.gf_fft", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_inp);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
 153:                        double cval, double truncate, str backend, unsigned int num_threads):
+154:     inp = np.PyArray_GETCONTIGUOUS(inp)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_inp)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+155:     inp = np.PyArray_Cast(inp, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_inp, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 155, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 156: 
+157:     cdef int ndim = inp.ndim
  __pyx_t_2 = __pyx_v_inp->nd;
  __pyx_v_ndim = __pyx_t_2;
+158:     cdef np.npy_intp *dims = inp.shape
  __pyx_t_3 = __pyx_v_inp->dimensions;
  __pyx_v_dims = __pyx_t_3;
+159:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_4 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_4);
  __pyx_t_4 = 0;
+160:     cdef double *_out = <double *>np.PyArray_DATA(out)
  __pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+161:     cdef double *_inp = <double *>np.PyArray_DATA(inp)
  __pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_inp));
+162:     cdef unsigned long *_dims = <unsigned long *>dims
  __pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+163:     cdef double *_sig = <double *>np.PyArray_DATA(sigma)
  __pyx_v__sig = ((double *)PyArray_DATA(__pyx_v_sigma));
+164:     cdef unsigned *_ord = <unsigned *>np.PyArray_DATA(order)
  __pyx_v__ord = ((unsigned int *)PyArray_DATA(__pyx_v_order));
+165:     cdef int _mode = extend_mode_to_code(mode)
  __pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 165, __pyx_L1_error)
  __pyx_v__mode = __pyx_t_2;
+166:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }
+167:         if backend == 'fftw':
        __pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 167, __pyx_L4_error)
        __pyx_t_6 = (__pyx_t_5 != 0);
        if (__pyx_t_6) {
/* … */
          goto __pyx_L6;
        }
+168:             gauss_filter_fftw(_out, _inp, ndim, _dims, _sig, _ord, _mode, cval, truncate, num_threads)
          gauss_filter_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__ord, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads);
+169:         elif backend == 'numpy':
        __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 169, __pyx_L4_error)
        __pyx_t_5 = (__pyx_t_6 != 0);
        if (__pyx_t_5) {
/* … */
          goto __pyx_L6;
        }
+170:             fail = gauss_filter_np(_out, _inp, ndim, _dims, _sig, _ord, _mode, cval, truncate, num_threads)
          __pyx_v_fail = gauss_filter_np(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__ord, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads);
+171:             if fail:
          __pyx_t_5 = (__pyx_v_fail != 0);
          if (__pyx_t_5) {
/* … */
          }
+172:                 raise RuntimeError('NumPy FFT exited with error')
            {
                #ifdef WITH_THREAD
                PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
                #endif
                /*try:*/ {
                  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 172, __pyx_L9_error)
                  __Pyx_GOTREF(__pyx_t_4);
                  __Pyx_Raise(__pyx_t_4, 0, 0, 0);
                  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
                  __PYX_ERR(0, 172, __pyx_L9_error)
                }
                /*finally:*/ {
                  __pyx_L9_error: {
                    #ifdef WITH_THREAD
                    __Pyx_PyGILState_Release(__pyx_gilstate_save);
                    #endif
                    goto __pyx_L4_error;
                  }
                }
            }
/* … */
  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_NumPy_FFT_exited_with_error); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 172, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__5);
  __Pyx_GIVEREF(__pyx_tuple__5);
 173:         else:
+174:             raise ValueError('{:s} is invalid backend'.format(backend))
        /*else*/ {
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 174, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_7 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_7)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_7);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_7, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend);
                __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
                if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 174, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_4);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 174, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 174, __pyx_L12_error)
              }
              /*finally:*/ {
                __pyx_L12_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L4_error;
                }
              }
          }
        }
        __pyx_L6:;
      }
+175:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 176: 
+177: def gaussian_filter(inp: np.ndarray, sigma: object, order: object=0, mode: str='reflect',
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_5gaussian_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_4gaussian_filter[] = "gaussian_filter(ndarray inp: np.ndarray, sigma: object, order: object = 0, unicode mode: str = u'reflect', double cval: cython.double = 0., double truncate: cython.double = 4., unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nMultidimensional Gaussian filter. The multidimensional filter is implemented as\n    a sequence of 1-D FFT convolutions.\n\n    Parameters\n    ----------\n    inp : np.ndarray\n        The input array.\n    sigma : float or list of floats\n        Standard deviation for Gaussian kernel. The standard deviations of the Gaussian\n        filter are given for each axis as a sequence, or as a single number, in which case\n        it is equal for all axes.\n    order : int or list of ints, optional\n        The order of the filter along each axis is given as a sequence of integers, or as\n        a single number. An order of 0 corresponds to convolution with a Gaussian kernel.\n        A positive order corresponds to convolution with that derivative of a Gaussian.\n    mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n        The mode parameter determines how the input array is extended when the filter\n        overlaps a border. Default value is 'reflect'. The valid values and their behavior\n        is as follows:\n\n        * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n          values beyond the edge with the same constant value, defined by the `cval`\n          parameter.\n        * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n          the last pixel.\n        * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n          about the center of the last pixel. This mode is also sometimes referred to as\n          whole-sample symmetric.\n        * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n          about the edge of the last pixel. This mode is al""so sometimes referred to as\n          half-sample symmetric.\n        * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n          to the opposite edge.\n    cval : float, optional\n        Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n    truncate : float, optional\n        Truncate the filter at this many standard deviations. Default is 4.0.\n    backend : {'fftw', 'numpy'}, optional\n        Choose backend library for the FFT implementation.\n    num_threads : int, optional\n        Number of threads.\n    \n    Returns\n    -------\n    out : np.ndarray\n        Returned array of same shape as `input`.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_5gaussian_filter = {"gaussian_filter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_5gaussian_filter, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_4gaussian_filter};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_5gaussian_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_inp = 0;
  PyObject *__pyx_v_sigma = 0;
  PyObject *__pyx_v_order = 0;
  PyObject *__pyx_v_mode = 0;
  double __pyx_v_cval;
  double __pyx_v_truncate;
  PyObject *__pyx_v_backend = 0;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_filter (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inp,&__pyx_n_s_sigma,&__pyx_n_s_order,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_truncate,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0};
    PyObject* values[8] = {0,0,0,0,0,0,0,0};
    values[2] = ((PyObject *)((PyObject *)__pyx_int_0));
    values[3] = ((PyObject*)((PyObject*)__pyx_n_u_reflect));
    values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inp)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("gaussian_filter", 0, 2, 8, 1); __PYX_ERR(0, 177, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_order);
          if (value) { values[2] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
          if (value) { values[3] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval);
          if (value) { values[4] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate);
          if (value) { values[5] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[6] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  7:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[7] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_filter") < 0)) __PYX_ERR(0, 177, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_inp = ((PyArrayObject *)values[0]);
    __pyx_v_sigma = values[1];
    __pyx_v_order = values[2];
    __pyx_v_mode = ((PyObject*)values[3]);
    if (values[4]) {
      __pyx_v_cval = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L3_error)
    } else {
      __pyx_v_cval = ((double)((double)0.));
    }
    if (values[5]) {
      __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L3_error)
    } else {
      __pyx_v_truncate = ((double)((double)4.));
    }
    __pyx_v_backend = ((PyObject*)values[6]);
    if (values[7]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 179, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gaussian_filter", 0, 2, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 177, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_filter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_inp), __pyx_ptype_5numpy_ndarray, 1, "inp", 0))) __PYX_ERR(0, 177, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 177, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 178, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_4gaussian_filter(__pyx_self, __pyx_v_inp, __pyx_v_sigma, __pyx_v_order, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_4gaussian_filter(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_inp, PyObject *__pyx_v_sigma, PyObject *__pyx_v_order, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  int __pyx_v_ndim;
  PyArrayObject *__pyx_v_sigmas = 0;
  PyArrayObject *__pyx_v_orders = 0;
  int __pyx_v_i;
  CYTHON_UNUSED npy_intp *__pyx_v_dims;
  npy_intp *__pyx_v_kdims;
  int __pyx_v_if_fft;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_filter", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_filter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_sigmas);
  __Pyx_XDECREF((PyObject *)__pyx_v_orders);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__15 = PyTuple_Pack(15, __pyx_n_s_inp, __pyx_n_s_sigma, __pyx_n_s_order, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_truncate, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_sigmas, __pyx_n_s_orders, __pyx_n_s_i, __pyx_n_s_dims, __pyx_n_s_kdims, __pyx_n_s_if_fft); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__15);
  __Pyx_GIVEREF(__pyx_tuple__15);
/* … */
  __pyx_t_5 = PyTuple_New(6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_INCREF(((PyObject *)__pyx_int_0));
  __Pyx_GIVEREF(((PyObject *)__pyx_int_0));
  PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_int_0));
  __Pyx_INCREF(((PyObject*)__pyx_n_u_reflect));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_reflect));
  PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject*)__pyx_n_u_reflect));
  __Pyx_GIVEREF(__pyx_t_2);
  PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_1);
  PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_1);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy));
  PyTuple_SET_ITEM(__pyx_t_5, 4, ((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_5, 5, __pyx_t_4);
  __pyx_t_2 = 0;
  __pyx_t_1 = 0;
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_inp, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_sigma, __pyx_n_u_object) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_order, __pyx_n_u_object) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_5gaussian_filter, 0, __pyx_n_s_gaussian_filter, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__16)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_t_5);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_1, __pyx_t_4);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_filter, __pyx_t_1) < 0) __PYX_ERR(0, 177, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(8, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_filter, 177, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 177, __pyx_L1_error)
+178:                     cval: cython.double=0., truncate: cython.double=4., backend: str='numpy',
  __pyx_t_2 = PyFloat_FromDouble(((double)0.)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_t_1 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
+179:                     num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_4 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 179, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
 180:     r"""Multidimensional Gaussian filter. The multidimensional filter is implemented as
 181:     a sequence of 1-D FFT convolutions.
 182: 
 183:     Parameters
 184:     ----------
 185:     inp : np.ndarray
 186:         The input array.
 187:     sigma : float or list of floats
 188:         Standard deviation for Gaussian kernel. The standard deviations of the Gaussian
 189:         filter are given for each axis as a sequence, or as a single number, in which case
 190:         it is equal for all axes.
 191:     order : int or list of ints, optional
 192:         The order of the filter along each axis is given as a sequence of integers, or as
 193:         a single number. An order of 0 corresponds to convolution with a Gaussian kernel.
 194:         A positive order corresponds to convolution with that derivative of a Gaussian.
 195:     mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
 196:         The mode parameter determines how the input array is extended when the filter
 197:         overlaps a border. Default value is 'reflect'. The valid values and their behavior
 198:         is as follows:
 199: 
 200:         * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
 201:           values beyond the edge with the same constant value, defined by the `cval`
 202:           parameter.
 203:         * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
 204:           the last pixel.
 205:         * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
 206:           about the center of the last pixel. This mode is also sometimes referred to as
 207:           whole-sample symmetric.
 208:         * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
 209:           about the edge of the last pixel. This mode is also sometimes referred to as
 210:           half-sample symmetric.
 211:         * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
 212:           to the opposite edge.
 213:     cval : float, optional
 214:         Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
 215:     truncate : float, optional
 216:         Truncate the filter at this many standard deviations. Default is 4.0.
 217:     backend : {'fftw', 'numpy'}, optional
 218:         Choose backend library for the FFT implementation.
 219:     num_threads : int, optional
 220:         Number of threads.
 221:     
 222:     Returns
 223:     -------
 224:     out : np.ndarray
 225:         Returned array of same shape as `input`.
 226:     """
+227:     cdef int ndim = inp.ndim
  __pyx_t_1 = __pyx_v_inp->nd;
  __pyx_v_ndim = __pyx_t_1;
+228:     cdef np.ndarray sigmas = normalize_sequence(sigma, ndim, np.NPY_FLOAT64)
  __pyx_t_2 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_sigma, __pyx_v_ndim, NPY_FLOAT64)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 228, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_sigmas = ((PyArrayObject *)__pyx_t_2);
  __pyx_t_2 = 0;
+229:     cdef np.ndarray orders = normalize_sequence(order, ndim, np.NPY_UINT32)
  __pyx_t_2 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_order, __pyx_v_ndim, NPY_UINT32)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 229, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_orders = ((PyArrayObject *)__pyx_t_2);
  __pyx_t_2 = 0;
 230:     cdef int i
+231:     cdef np.npy_intp *dims = inp.shape
  __pyx_t_3 = __pyx_v_inp->dimensions;
  __pyx_v_dims = __pyx_t_3;
+232:     cdef np.npy_intp *kdims = <np.npy_intp *>malloc(ndim * sizeof(np.npy_intp))
  __pyx_v_kdims = ((npy_intp *)malloc((__pyx_v_ndim * (sizeof(npy_intp)))));
+233:     for i in range(ndim):
  __pyx_t_1 = __pyx_v_ndim;
  __pyx_t_4 = __pyx_t_1;
  for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
    __pyx_v_i = __pyx_t_5;
+234:         kdims[i] = <np.npy_intp>(2 * sigmas[i] * truncate) + 1
    __pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_sigmas), __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 234, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_6 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 234, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = PyFloat_FromDouble(__pyx_v_truncate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 234, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_7 = PyNumber_Multiply(__pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 234, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_8 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_7); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 234, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    (__pyx_v_kdims[__pyx_v_i]) = (((npy_intp)__pyx_t_8) + 1);
  }
+235:     cdef bint if_fft = fft_faster(inp.shape, kdims, ndim)
  __pyx_v_if_fft = __pyx_f_6pyrost_3bin_10simulation_fft_faster(__pyx_v_inp->dimensions, __pyx_v_kdims, __pyx_v_ndim);
+236:     free(kdims)
  free(__pyx_v_kdims);
+237:     if if_fft:
  __pyx_t_9 = (__pyx_v_if_fft != 0);
  if (__pyx_t_9) {
/* … */
  }
+238:         return gf_fft(inp, sigmas, orders, mode, cval, truncate, backend, num_threads)
    __Pyx_XDECREF(((PyObject *)__pyx_r));
    __pyx_t_7 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_gf_fft(__pyx_v_inp, __pyx_v_sigmas, __pyx_v_orders, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 238, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_r = ((PyArrayObject *)__pyx_t_7);
    __pyx_t_7 = 0;
    goto __pyx_L0;
 239:     else:
+240:         return gf(input=inp, sigma=sigma, order=order, mode=mode, cval=cval, truncate=truncate)
  /*else*/ {
    __Pyx_XDECREF(((PyObject *)__pyx_r));
    __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_gf); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_2 = __Pyx_PyDict_NewPresized(6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_input, ((PyObject *)__pyx_v_inp)) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_sigma, __pyx_v_sigma) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_order, __pyx_v_order) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_mode, __pyx_v_mode) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    __pyx_t_6 = PyFloat_FromDouble(__pyx_v_cval); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_cval, __pyx_t_6) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_6 = PyFloat_FromDouble(__pyx_v_truncate); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_truncate, __pyx_t_6) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 240, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 240, __pyx_L1_error)
    __pyx_r = ((PyArrayObject *)__pyx_t_6);
    __pyx_t_6 = 0;
    goto __pyx_L0;
  }
 241: 
+242: cdef np.ndarray ggm_fft(np.ndarray inp, np.ndarray sigma, str mode, double cval,
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_ggm_fft(PyArrayObject *__pyx_v_inp, PyArrayObject *__pyx_v_sigma, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  int __pyx_v_ndim;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  double *__pyx_v__out;
  double *__pyx_v__inp;
  unsigned long *__pyx_v__dims;
  double *__pyx_v__sig;
  int __pyx_v__mode;
  int __pyx_v_fail;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("ggm_fft", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_inp);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_4);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("pyrost.bin.simulation.ggm_fft", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_inp);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
 243:                         double truncate, str backend, unsigned int num_threads):
+244:     inp = np.PyArray_GETCONTIGUOUS(inp)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_inp)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 244, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+245:     inp = np.PyArray_Cast(inp, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_inp, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 245, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 245, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 246: 
+247:     cdef int ndim = inp.ndim
  __pyx_t_2 = __pyx_v_inp->nd;
  __pyx_v_ndim = __pyx_t_2;
+248:     cdef np.npy_intp *dims = inp.shape
  __pyx_t_3 = __pyx_v_inp->dimensions;
  __pyx_v_dims = __pyx_t_3;
+249:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_4 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_4);
  __pyx_t_4 = 0;
+250:     cdef double *_out = <double *>np.PyArray_DATA(out)
  __pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+251:     cdef double *_inp = <double *>np.PyArray_DATA(inp)
  __pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_inp));
+252:     cdef unsigned long *_dims = <unsigned long *>dims
  __pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+253:     cdef double *_sig = <double *>np.PyArray_DATA(sigma)
  __pyx_v__sig = ((double *)PyArray_DATA(__pyx_v_sigma));
+254:     cdef int _mode = extend_mode_to_code(mode)
  __pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 254, __pyx_L1_error)
  __pyx_v__mode = __pyx_t_2;
+255:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }
+256:         if backend == 'fftw':
        __pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 256, __pyx_L4_error)
        __pyx_t_6 = (__pyx_t_5 != 0);
        if (__pyx_t_6) {
/* … */
          goto __pyx_L6;
        }
+257:             gauss_grad_fftw(_out, _inp, ndim, _dims, _sig, _mode, cval, truncate, num_threads)
          gauss_grad_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads);
+258:         elif backend == 'numpy':
        __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 258, __pyx_L4_error)
        __pyx_t_5 = (__pyx_t_6 != 0);
        if (__pyx_t_5) {
/* … */
          goto __pyx_L6;
        }
+259:             fail = gauss_grad_np(_out, _inp, ndim, _dims, _sig, _mode, cval, truncate, num_threads)
          __pyx_v_fail = gauss_grad_np(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads);
+260:             if fail:
          __pyx_t_5 = (__pyx_v_fail != 0);
          if (__pyx_t_5) {
/* … */
          }
+261:                 raise RuntimeError('NumPy FFT exited with error')
            {
                #ifdef WITH_THREAD
                PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
                #endif
                /*try:*/ {
                  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 261, __pyx_L9_error)
                  __Pyx_GOTREF(__pyx_t_4);
                  __Pyx_Raise(__pyx_t_4, 0, 0, 0);
                  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
                  __PYX_ERR(0, 261, __pyx_L9_error)
                }
                /*finally:*/ {
                  __pyx_L9_error: {
                    #ifdef WITH_THREAD
                    __Pyx_PyGILState_Release(__pyx_gilstate_save);
                    #endif
                    goto __pyx_L4_error;
                  }
                }
            }
 262:         else:
+263:             raise ValueError('{:s} is invalid backend'.format(backend))
        /*else*/ {
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 263, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_7 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_7)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_7);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_7, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend);
                __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
                if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 263, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_4);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 263, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 263, __pyx_L12_error)
              }
              /*finally:*/ {
                __pyx_L12_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L4_error;
                }
              }
          }
        }
        __pyx_L6:;
      }
+264:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 265: 
+266: def gaussian_gradient_magnitude(inp: np.ndarray, sigma: object, mode: str='reflect',
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_7gaussian_gradient_magnitude(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_6gaussian_gradient_magnitude[] = "gaussian_gradient_magnitude(ndarray inp: np.ndarray, sigma: object, unicode mode: str = u'reflect', double cval: cython.double = 0., double truncate: cython.double = 4., unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nMultidimensional gradient magnitude using Gaussian derivatives. The multidimensional\n    filter is implemented as a sequence of 1-D FFT convolutions.\n\n    Parameters\n    ----------\n    inp : np.ndarray\n        The input array.\n    sigma : float or list of floats\n        The standard deviations of the Gaussian filter are given for each axis as a sequence,\n        or as a single number, in which case it is equal for all axes.\n    mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n        The mode parameter determines how the input array is extended when the filter\n        overlaps a border. Default value is 'reflect'. The valid values and their behavior\n        is as follows:\n\n        * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n          values beyond the edge with the same constant value, defined by the `cval`\n          parameter.\n        * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n          the last pixel.\n        * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n          about the center of the last pixel. This mode is also sometimes referred to as\n          whole-sample symmetric.\n        * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n          about the edge of the last pixel. This mode is also sometimes referred to as\n          half-sample symmetric.\n        * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n          to the opposite edge.\n    cval : float, optional\n        Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n    truncate : float"", optional\n        Truncate the filter at this many standard deviations. Default is 4.0.\n    backend : {'fftw', 'numpy'}, optional\n        Choose backend library for the FFT implementation.\n    num_threads : int, optional\n        Number of threads.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_7gaussian_gradient_magnitude = {"gaussian_gradient_magnitude", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_7gaussian_gradient_magnitude, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_6gaussian_gradient_magnitude};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_7gaussian_gradient_magnitude(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_inp = 0;
  PyObject *__pyx_v_sigma = 0;
  PyObject *__pyx_v_mode = 0;
  double __pyx_v_cval;
  double __pyx_v_truncate;
  PyObject *__pyx_v_backend = 0;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_gradient_magnitude (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inp,&__pyx_n_s_sigma,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_truncate,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0};
    PyObject* values[7] = {0,0,0,0,0,0,0};
    values[2] = ((PyObject*)((PyObject*)__pyx_n_u_reflect));
    values[5] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inp)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("gaussian_gradient_magnitude", 0, 2, 7, 1); __PYX_ERR(0, 266, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
          if (value) { values[2] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval);
          if (value) { values[3] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate);
          if (value) { values[4] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[5] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[6] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_gradient_magnitude") < 0)) __PYX_ERR(0, 266, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_inp = ((PyArrayObject *)values[0]);
    __pyx_v_sigma = values[1];
    __pyx_v_mode = ((PyObject*)values[2]);
    if (values[3]) {
      __pyx_v_cval = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 267, __pyx_L3_error)
    } else {
      __pyx_v_cval = ((double)((double)0.));
    }
    if (values[4]) {
      __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 267, __pyx_L3_error)
    } else {
      __pyx_v_truncate = ((double)((double)4.));
    }
    __pyx_v_backend = ((PyObject*)values[5]);
    if (values[6]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 268, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("gaussian_gradient_magnitude", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 266, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_gradient_magnitude", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_inp), __pyx_ptype_5numpy_ndarray, 1, "inp", 0))) __PYX_ERR(0, 266, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 266, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 268, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_6gaussian_gradient_magnitude(__pyx_self, __pyx_v_inp, __pyx_v_sigma, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_6gaussian_gradient_magnitude(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_inp, PyObject *__pyx_v_sigma, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  int __pyx_v_ndim;
  PyArrayObject *__pyx_v_sigmas = 0;
  int __pyx_v_i;
  CYTHON_UNUSED npy_intp *__pyx_v_dims;
  npy_intp *__pyx_v_kdims;
  int __pyx_v_if_fft;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("gaussian_gradient_magnitude", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  __Pyx_XDECREF(__pyx_t_6);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_gradient_magnitude", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_sigmas);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__17 = PyTuple_Pack(13, __pyx_n_s_inp, __pyx_n_s_sigma, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_truncate, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_sigmas, __pyx_n_s_i, __pyx_n_s_dims, __pyx_n_s_kdims, __pyx_n_s_if_fft); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__17);
  __Pyx_GIVEREF(__pyx_tuple__17);
/* … */
  __pyx_t_2 = PyTuple_New(5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_reflect));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_reflect));
  PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject*)__pyx_n_u_reflect));
  __Pyx_GIVEREF(__pyx_t_1);
  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_4);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy));
  PyTuple_SET_ITEM(__pyx_t_2, 3, ((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(__pyx_t_5);
  PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_t_5);
  __pyx_t_1 = 0;
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_inp, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_sigma, __pyx_n_u_object) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_7gaussian_gradient_magnitude, 0, __pyx_n_s_gaussian_gradient_magnitude, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__18)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_2);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_5);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_gradient_magnitude, __pyx_t_4) < 0) __PYX_ERR(0, 266, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(7, 0, 13, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_gradient_magnitude, 266, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 266, __pyx_L1_error)
+267:                                 cval: cython.double=0., truncate: cython.double=4.,
  __pyx_t_1 = PyFloat_FromDouble(((double)0.)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_4 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 267, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
+268:                                 backend: str='numpy', num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_5 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 268, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
 269:     r"""Multidimensional gradient magnitude using Gaussian derivatives. The multidimensional
 270:     filter is implemented as a sequence of 1-D FFT convolutions.
 271: 
 272:     Parameters
 273:     ----------
 274:     inp : np.ndarray
 275:         The input array.
 276:     sigma : float or list of floats
 277:         The standard deviations of the Gaussian filter are given for each axis as a sequence,
 278:         or as a single number, in which case it is equal for all axes.
 279:     mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
 280:         The mode parameter determines how the input array is extended when the filter
 281:         overlaps a border. Default value is 'reflect'. The valid values and their behavior
 282:         is as follows:
 283: 
 284:         * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
 285:           values beyond the edge with the same constant value, defined by the `cval`
 286:           parameter.
 287:         * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
 288:           the last pixel.
 289:         * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
 290:           about the center of the last pixel. This mode is also sometimes referred to as
 291:           whole-sample symmetric.
 292:         * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
 293:           about the edge of the last pixel. This mode is also sometimes referred to as
 294:           half-sample symmetric.
 295:         * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
 296:           to the opposite edge.
 297:     cval : float, optional
 298:         Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
 299:     truncate : float, optional
 300:         Truncate the filter at this many standard deviations. Default is 4.0.
 301:     backend : {'fftw', 'numpy'}, optional
 302:         Choose backend library for the FFT implementation.
 303:     num_threads : int, optional
 304:         Number of threads.
 305:     """
+306:     cdef int ndim = inp.ndim
  __pyx_t_1 = __pyx_v_inp->nd;
  __pyx_v_ndim = __pyx_t_1;
+307:     cdef np.ndarray sigmas = normalize_sequence(sigma, ndim, np.NPY_FLOAT64)
  __pyx_t_2 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_sigma, __pyx_v_ndim, NPY_FLOAT64)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 307, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __pyx_v_sigmas = ((PyArrayObject *)__pyx_t_2);
  __pyx_t_2 = 0;
 308:     cdef int i
+309:     cdef np.npy_intp *dims = inp.shape
  __pyx_t_3 = __pyx_v_inp->dimensions;
  __pyx_v_dims = __pyx_t_3;
+310:     cdef np.npy_intp *kdims = <np.npy_intp *>malloc(ndim * sizeof(np.npy_intp))
  __pyx_v_kdims = ((npy_intp *)malloc((__pyx_v_ndim * (sizeof(npy_intp)))));
+311:     for i in range(ndim):
  __pyx_t_1 = __pyx_v_ndim;
  __pyx_t_4 = __pyx_t_1;
  for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
    __pyx_v_i = __pyx_t_5;
+312:         kdims[i] = <np.npy_intp>(2 * sigmas[i] * truncate) + 1
    __pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_sigmas), __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 312, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_6 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 312, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_2 = PyFloat_FromDouble(__pyx_v_truncate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 312, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    __pyx_t_7 = PyNumber_Multiply(__pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 312, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    __pyx_t_8 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_7); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 312, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    (__pyx_v_kdims[__pyx_v_i]) = (((npy_intp)__pyx_t_8) + 1);
  }
+313:     cdef bint if_fft = fft_faster(inp.shape, kdims, ndim)
  __pyx_v_if_fft = __pyx_f_6pyrost_3bin_10simulation_fft_faster(__pyx_v_inp->dimensions, __pyx_v_kdims, __pyx_v_ndim);
+314:     free(kdims)
  free(__pyx_v_kdims);
+315:     if if_fft:
  __pyx_t_9 = (__pyx_v_if_fft != 0);
  if (__pyx_t_9) {
/* … */
  }
+316:         return ggm_fft(inp, sigmas, mode, cval, truncate, backend, num_threads)
    __Pyx_XDECREF(((PyObject *)__pyx_r));
    __pyx_t_7 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_ggm_fft(__pyx_v_inp, __pyx_v_sigmas, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 316, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_r = ((PyArrayObject *)__pyx_t_7);
    __pyx_t_7 = 0;
    goto __pyx_L0;
 317:     else:
+318:         return ggm(input=inp, sigma=sigma, mode=mode, cval=cval, truncate=truncate)
  /*else*/ {
    __Pyx_XDECREF(((PyObject *)__pyx_r));
    __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_ggm); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_7);
    __pyx_t_2 = __Pyx_PyDict_NewPresized(5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_2);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_input, ((PyObject *)__pyx_v_inp)) < 0) __PYX_ERR(0, 318, __pyx_L1_error)
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_sigma, __pyx_v_sigma) < 0) __PYX_ERR(0, 318, __pyx_L1_error)
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_mode, __pyx_v_mode) < 0) __PYX_ERR(0, 318, __pyx_L1_error)
    __pyx_t_6 = PyFloat_FromDouble(__pyx_v_cval); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_cval, __pyx_t_6) < 0) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_6 = PyFloat_FromDouble(__pyx_v_truncate); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_truncate, __pyx_t_6) < 0) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 318, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_6);
    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
    if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 318, __pyx_L1_error)
    __pyx_r = ((PyArrayObject *)__pyx_t_6);
    __pyx_t_6 = 0;
    goto __pyx_L0;
  }
 319: 
+320: def rsc_wp(wft: np.ndarray, dx0: cython.double, dx: cython.double, z: cython.double,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_9rsc_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_8rsc_wp[] = "rsc_wp(ndarray wft: np.ndarray, double dx0: cython.double, double dx: cython.double, double z: cython.double, double wl: cython.double, int axis: cython.int = -1, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nWavefront propagator based on Rayleigh-Sommerfeld convolution\n    method [RSC]_. Propagates a wavefront `wft` by `z` distance\n    downstream. You can choose between 'fftw' and 'numpy' backends for FFT\n    calculations. 'fftw' backend supports multiprocessing.\n\n    Parameters\n    ----------\n    wft : numpy.ndarray\n        Initial wavefront.\n    dx0 : float\n        Sampling interval at the plane upstream [um].\n    dx : float\n        Sampling interval at the plane downstream [um].\n    z : float\n        Propagation distance [um].\n    wl : float\n        Incoming beam's wavelength [um].\n    axis : int, optional\n        Axis of `wft` array along which the calculation is\n        performed.\n    backend : {'fftw', 'numpy'}, optional\n        Choose backend library for the FFT implementation.\n    num_threads: int, optional\n        Number of threads used in calculation. Only 'fftw' backend\n        supports it.\n\n    Returns\n    -------\n    out : numpy.ndarray\n        Propagated wavefront.\n\n    Raises\n    ------\n    RuntimeError\n        If 'numpy' backend exits with eror during the calculation.\n    ValueError\n        If `backend` option is invalid.\n\n    Notes\n    -----\n    The Rayleigh\342\200\223Sommerfeld diffraction integral transform is defined as:\n\n    .. math::\n        u^{\\prime}(x^{\\prime}) = \\frac{z}{j \\sqrt{\\lambda}} \\int_{-\\infty}^{+\\infty}\n        u(x) \\mathrm{exp} \\left[-j k r(x, x^{\\prime}) \\right] dx\n    \n    with\n\n    .. math::\n        r(x, x^{\\prime}) = \\left[ (x - x^{\\prime})^2 + z^2 \\right]^{1 / 2}\n\n    References\n    ----------\n    .. [RSC] V. Nascov and P. C. Logof\304\203tu, \"Fast computation algorithm\n             for the Rayleigh-Sommerfel""d diffraction formula using\n             a type of scaled convolution,\" Appl. Opt. 48, 4310-4319\n             (2009).\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_9rsc_wp = {"rsc_wp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_9rsc_wp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_8rsc_wp};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_9rsc_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_wft = 0;
  double __pyx_v_dx0;
  double __pyx_v_dx;
  double __pyx_v_z;
  double __pyx_v_wl;
  int __pyx_v_axis;
  PyObject *__pyx_v_backend = 0;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("rsc_wp (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wft,&__pyx_n_s_dx0,&__pyx_n_s_dx,&__pyx_n_s_z,&__pyx_n_s_wl,&__pyx_n_s_axis,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0};
    PyObject* values[8] = {0,0,0,0,0,0,0,0};
    values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wft)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx0)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 1); __PYX_ERR(0, 320, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 2); __PYX_ERR(0, 320, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 3); __PYX_ERR(0, 320, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wl)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 4); __PYX_ERR(0, 320, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis);
          if (value) { values[5] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[6] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  7:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[7] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rsc_wp") < 0)) __PYX_ERR(0, 320, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_wft = ((PyArrayObject *)values[0]);
    __pyx_v_dx0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dx0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 320, __pyx_L3_error)
    __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 320, __pyx_L3_error)
    __pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 320, __pyx_L3_error)
    __pyx_v_wl = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wl == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error)
    if (values[5]) {
      __pyx_v_axis = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error)
    } else {
      __pyx_v_axis = ((int)((int)-1));
    }
    __pyx_v_backend = ((PyObject*)values[6]);
    if (values[7]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 322, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 320, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.rsc_wp", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_wft), __pyx_ptype_5numpy_ndarray, 1, "wft", 0))) __PYX_ERR(0, 320, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 321, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_8rsc_wp(__pyx_self, __pyx_v_wft, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_axis, __pyx_v_backend, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_8rsc_wp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_wft, double __pyx_v_dx0, double __pyx_v_dx, double __pyx_v_z, double __pyx_v_wl, int __pyx_v_axis, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  npy_intp __pyx_v_isize;
  int __pyx_v_ndim;
  npy_intp __pyx_v_istride;
  npy_intp __pyx_v_npts;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  __pyx_t_double_complex *__pyx_v__out;
  __pyx_t_double_complex *__pyx_v__inp;
  int __pyx_v_fail;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("rsc_wp", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_wft);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("pyrost.bin.simulation.rsc_wp", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_wft);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__19 = PyTuple_Pack(17, __pyx_n_s_wft, __pyx_n_s_dx0, __pyx_n_s_dx, __pyx_n_s_z, __pyx_n_s_wl, __pyx_n_s_axis, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_isize, __pyx_n_s_ndim, __pyx_n_s_istride, __pyx_n_s_npts, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_fail); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__19);
  __Pyx_GIVEREF(__pyx_tuple__19);
/* … */
  __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy));
  PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(__pyx_t_5);
  PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_wft, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dx0, __pyx_n_u_double) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_z, __pyx_n_u_double) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_wl, __pyx_n_u_double) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_9rsc_wp, 0, __pyx_n_s_rsc_wp, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__20)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 320, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_2);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_5);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_rsc_wp, __pyx_t_4) < 0) __PYX_ERR(0, 320, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(8, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_rsc_wp, 320, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 320, __pyx_L1_error)
+321:            wl: cython.double, axis: cython.int=-1, backend: str='numpy',
  __pyx_t_4 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 321, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
+322:            num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_5 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 322, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
 323:     r"""Wavefront propagator based on Rayleigh-Sommerfeld convolution
 324:     method [RSC]_. Propagates a wavefront `wft` by `z` distance
 325:     downstream. You can choose between 'fftw' and 'numpy' backends for FFT
 326:     calculations. 'fftw' backend supports multiprocessing.
 327: 
 328:     Parameters
 329:     ----------
 330:     wft : numpy.ndarray
 331:         Initial wavefront.
 332:     dx0 : float
 333:         Sampling interval at the plane upstream [um].
 334:     dx : float
 335:         Sampling interval at the plane downstream [um].
 336:     z : float
 337:         Propagation distance [um].
 338:     wl : float
 339:         Incoming beam's wavelength [um].
 340:     axis : int, optional
 341:         Axis of `wft` array along which the calculation is
 342:         performed.
 343:     backend : {'fftw', 'numpy'}, optional
 344:         Choose backend library for the FFT implementation.
 345:     num_threads: int, optional
 346:         Number of threads used in calculation. Only 'fftw' backend
 347:         supports it.
 348: 
 349:     Returns
 350:     -------
 351:     out : numpy.ndarray
 352:         Propagated wavefront.
 353: 
 354:     Raises
 355:     ------
 356:     RuntimeError
 357:         If 'numpy' backend exits with eror during the calculation.
 358:     ValueError
 359:         If `backend` option is invalid.
 360: 
 361:     Notes
 362:     -----
 363:     The Rayleigh–Sommerfeld diffraction integral transform is defined as:
 364: 
 365:     .. math::
 366:         u^{\prime}(x^{\prime}) = \frac{z}{j \sqrt{\lambda}} \int_{-\infty}^{+\infty}
 367:         u(x) \mathrm{exp} \left[-j k r(x, x^{\prime}) \right] dx
 368:     
 369:     with
 370: 
 371:     .. math::
 372:         r(x, x^{\prime}) = \left[ (x - x^{\prime})^2 + z^2 \right]^{1 / 2}
 373: 
 374:     References
 375:     ----------
 376:     .. [RSC] V. Nascov and P. C. Logofătu, "Fast computation algorithm
 377:              for the Rayleigh-Sommerfeld diffraction formula using
 378:              a type of scaled convolution," Appl. Opt. 48, 4310-4319
 379:              (2009).
 380:     """
+381:     wft = np.PyArray_GETCONTIGUOUS(wft)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_wft)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 381, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+382:     wft = np.PyArray_Cast(wft, np.NPY_COMPLEX128)
  __pyx_t_1 = PyArray_Cast(__pyx_v_wft, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 382, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 382, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 383: 
+384:     cdef np.npy_intp isize = np.PyArray_SIZE(wft)
  __pyx_v_isize = PyArray_SIZE(__pyx_v_wft);
+385:     cdef int ndim = wft.ndim
  __pyx_t_2 = __pyx_v_wft->nd;
  __pyx_v_ndim = __pyx_t_2;
+386:     axis = axis if axis >= 0 else ndim + axis
  if (((__pyx_v_axis >= 0) != 0)) {
    __pyx_t_2 = __pyx_v_axis;
  } else {
    __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis);
  }
  __pyx_v_axis = __pyx_t_2;
+387:     axis = axis if axis <= ndim - 1 else ndim - 1
  if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) {
    __pyx_t_3 = __pyx_v_axis;
  } else {
    __pyx_t_3 = (__pyx_v_ndim - 1);
  }
  __pyx_v_axis = __pyx_t_3;
+388:     cdef np.npy_intp istride = np.PyArray_STRIDE(wft, axis) / np.PyArray_ITEMSIZE(wft)
  __pyx_v_istride = (PyArray_STRIDE(__pyx_v_wft, __pyx_v_axis) / PyArray_ITEMSIZE(__pyx_v_wft));
+389:     cdef np.npy_intp npts = np.PyArray_DIM(wft, axis)
  __pyx_v_npts = PyArray_DIM(__pyx_v_wft, __pyx_v_axis);
+390:     cdef np.npy_intp *dims = wft.shape
  __pyx_t_4 = __pyx_v_wft->dimensions;
  __pyx_v_dims = __pyx_t_4;
+391:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_COMPLEX128)
  __pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 391, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_5);
  __pyx_t_5 = 0;
+392:     cdef complex *_out = <complex *>np.PyArray_DATA(out)
  __pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+393:     cdef complex *_inp = <complex *>np.PyArray_DATA(wft)
  __pyx_v__inp = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_wft));
+394:     cdef int fail = 0
  __pyx_v_fail = 0;
+395:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }
+396:         if backend == 'fftw':
        __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 396, __pyx_L4_error)
        __pyx_t_7 = (__pyx_t_6 != 0);
        if (__pyx_t_7) {
/* … */
          goto __pyx_L6;
        }
+397:             rsc_fftw(_out, _inp, isize, npts, istride, dx0, dx, z, wl, num_threads)
          rsc_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+398:         elif backend == 'numpy':
        __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 398, __pyx_L4_error)
        __pyx_t_6 = (__pyx_t_7 != 0);
        if (__pyx_t_6) {
/* … */
          goto __pyx_L6;
        }
+399:             fail = rsc_np(_out, _inp, isize, npts, istride, dx0, dx, z, wl, num_threads)
          __pyx_v_fail = rsc_np(__pyx_v__out, __pyx_v__inp, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+400:             if fail:
          __pyx_t_6 = (__pyx_v_fail != 0);
          if (__pyx_t_6) {
/* … */
          }
+401:                 raise RuntimeError('NumPy FFT exited with error')
            {
                #ifdef WITH_THREAD
                PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
                #endif
                /*try:*/ {
                  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 401, __pyx_L9_error)
                  __Pyx_GOTREF(__pyx_t_5);
                  __Pyx_Raise(__pyx_t_5, 0, 0, 0);
                  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                  __PYX_ERR(0, 401, __pyx_L9_error)
                }
                /*finally:*/ {
                  __pyx_L9_error: {
                    #ifdef WITH_THREAD
                    __Pyx_PyGILState_Release(__pyx_gilstate_save);
                    #endif
                    goto __pyx_L4_error;
                  }
                }
            }
 402:         else:
+403:             raise ValueError('{:s} is invalid backend'.format(backend))
        /*else*/ {
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 403, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_8 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_8)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_8);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend);
                __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
                if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 403, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_5);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 403, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 403, __pyx_L12_error)
              }
              /*finally:*/ {
                __pyx_L12_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L4_error;
                }
              }
          }
        }
        __pyx_L6:;
      }
+404:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 405: 
+406: def fraunhofer_wp(wft: np.ndarray, dx0: cython.double, dx: cython.double,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_11fraunhofer_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_10fraunhofer_wp[] = "fraunhofer_wp(ndarray wft: np.ndarray, double dx0: cython.double, double dx: cython.double, double z: cython.double, double wl: cython.double, int axis: cython.int = -1, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nFraunhofer diffraction propagator. Propagates a wavefront `wft` by\n    `z` distance downstream. You can choose between 'fftw' and 'numpy'\n    backends for FFT calculations. 'fftw' backend supports multiprocessing.\n\n    Parameters\n    ----------\n    wft : numpy.ndarray\n        Initial wavefront.\n    dx0 : float\n        Sampling interval at the plane upstream [um].\n    dx : float\n        Sampling interval at the plane downstream [um].\n    z : float\n        Propagation distance [um].\n    wl : float\n        Incoming beam's wavelength [um].\n    axis : int, optional\n        Axis of `wft` array along which the calculation is\n        performed.\n    backend : {'fftw', 'numpy'}, optional\n        Choose backend library for the FFT implementation.\n    num_threads: int, optional\n        Number of threads used in calculation. Only 'fftw' backend\n        supports it.\n\n    Returns\n    -------\n    out : numpy.ndarray\n        Propagated wavefront.\n\n    Raises\n    ------\n    RuntimeError\n        If 'numpy' backend exits with eror during the calculation.\n    ValueError\n        If `backend` option is invalid.\n\n    Notes\n    -----\n    The Fraunhofer integral transform is defined as:\n\n    .. math::\n        u^{\\prime}(x^{\\prime}) = \\frac{e^{-j k z}}{j \\sqrt{\\lambda z}}\n        e^{-\\frac{j k}{2 z} x^{\\prime 2}} \\int_{-\\infty}^{+\\infty} u(x)\n        e^{j\\frac{2 \\pi}{\\lambda z} x x^{\\prime}} dx\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_11fraunhofer_wp = {"fraunhofer_wp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_11fraunhofer_wp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_10fraunhofer_wp};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_11fraunhofer_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_wft = 0;
  double __pyx_v_dx0;
  double __pyx_v_dx;
  double __pyx_v_z;
  double __pyx_v_wl;
  int __pyx_v_axis;
  PyObject *__pyx_v_backend = 0;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("fraunhofer_wp (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wft,&__pyx_n_s_dx0,&__pyx_n_s_dx,&__pyx_n_s_z,&__pyx_n_s_wl,&__pyx_n_s_axis,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0};
    PyObject* values[8] = {0,0,0,0,0,0,0,0};
    values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wft)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx0)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 1); __PYX_ERR(0, 406, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 2); __PYX_ERR(0, 406, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 3); __PYX_ERR(0, 406, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wl)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 4); __PYX_ERR(0, 406, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis);
          if (value) { values[5] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[6] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  7:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[7] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fraunhofer_wp") < 0)) __PYX_ERR(0, 406, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
        CYTHON_FALLTHROUGH;
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_wft = ((PyArrayObject *)values[0]);
    __pyx_v_dx0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dx0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 406, __pyx_L3_error)
    __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 406, __pyx_L3_error)
    __pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 407, __pyx_L3_error)
    __pyx_v_wl = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wl == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 407, __pyx_L3_error)
    if (values[5]) {
      __pyx_v_axis = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 407, __pyx_L3_error)
    } else {
      __pyx_v_axis = ((int)((int)-1));
    }
    __pyx_v_backend = ((PyObject*)values[6]);
    if (values[7]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 408, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 406, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.fraunhofer_wp", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_wft), __pyx_ptype_5numpy_ndarray, 1, "wft", 0))) __PYX_ERR(0, 406, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 408, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_10fraunhofer_wp(__pyx_self, __pyx_v_wft, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_axis, __pyx_v_backend, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_10fraunhofer_wp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_wft, double __pyx_v_dx0, double __pyx_v_dx, double __pyx_v_z, double __pyx_v_wl, int __pyx_v_axis, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  npy_intp __pyx_v_isize;
  int __pyx_v_ndim;
  npy_intp __pyx_v_istride;
  npy_intp __pyx_v_npts;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  __pyx_t_double_complex *__pyx_v__out;
  __pyx_t_double_complex *__pyx_v__inp;
  int __pyx_v_fail;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("fraunhofer_wp", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_wft);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("pyrost.bin.simulation.fraunhofer_wp", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_wft);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__21 = PyTuple_Pack(17, __pyx_n_s_wft, __pyx_n_s_dx0, __pyx_n_s_dx, __pyx_n_s_z, __pyx_n_s_wl, __pyx_n_s_axis, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_isize, __pyx_n_s_ndim, __pyx_n_s_istride, __pyx_n_s_npts, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_fail); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__21);
  __Pyx_GIVEREF(__pyx_tuple__21);
/* … */
  __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy));
  PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(__pyx_t_5);
  PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
  __pyx_t_4 = 0;
  __pyx_t_5 = 0;
  __pyx_t_5 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_wft, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dx0, __pyx_n_u_double) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_z, __pyx_n_u_double) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_wl, __pyx_n_u_double) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_11fraunhofer_wp, 0, __pyx_n_s_fraunhofer_wp, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 406, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_2);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_5);
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_fraunhofer_wp, __pyx_t_4) < 0) __PYX_ERR(0, 406, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(8, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_fraunhofer_wp, 406, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 406, __pyx_L1_error)
+407:                   z: cython.double, wl: cython.double, axis: cython.int=-1,
  __pyx_t_4 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
+408:                   backend: str='numpy', num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_5 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 408, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
 409:     r"""Fraunhofer diffraction propagator. Propagates a wavefront `wft` by
 410:     `z` distance downstream. You can choose between 'fftw' and 'numpy'
 411:     backends for FFT calculations. 'fftw' backend supports multiprocessing.
 412: 
 413:     Parameters
 414:     ----------
 415:     wft : numpy.ndarray
 416:         Initial wavefront.
 417:     dx0 : float
 418:         Sampling interval at the plane upstream [um].
 419:     dx : float
 420:         Sampling interval at the plane downstream [um].
 421:     z : float
 422:         Propagation distance [um].
 423:     wl : float
 424:         Incoming beam's wavelength [um].
 425:     axis : int, optional
 426:         Axis of `wft` array along which the calculation is
 427:         performed.
 428:     backend : {'fftw', 'numpy'}, optional
 429:         Choose backend library for the FFT implementation.
 430:     num_threads: int, optional
 431:         Number of threads used in calculation. Only 'fftw' backend
 432:         supports it.
 433: 
 434:     Returns
 435:     -------
 436:     out : numpy.ndarray
 437:         Propagated wavefront.
 438: 
 439:     Raises
 440:     ------
 441:     RuntimeError
 442:         If 'numpy' backend exits with eror during the calculation.
 443:     ValueError
 444:         If `backend` option is invalid.
 445: 
 446:     Notes
 447:     -----
 448:     The Fraunhofer integral transform is defined as:
 449: 
 450:     .. math::
 451:         u^{\prime}(x^{\prime}) = \frac{e^{-j k z}}{j \sqrt{\lambda z}}
 452:         e^{-\frac{j k}{2 z} x^{\prime 2}} \int_{-\infty}^{+\infty} u(x)
 453:         e^{j\frac{2 \pi}{\lambda z} x x^{\prime}} dx
 454:     """
+455:     wft = np.PyArray_GETCONTIGUOUS(wft)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_wft)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 455, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+456:     wft = np.PyArray_Cast(wft, np.NPY_COMPLEX128)
  __pyx_t_1 = PyArray_Cast(__pyx_v_wft, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 456, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 456, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 457: 
+458:     cdef np.npy_intp isize = np.PyArray_SIZE(wft)
  __pyx_v_isize = PyArray_SIZE(__pyx_v_wft);
+459:     cdef int ndim = wft.ndim
  __pyx_t_2 = __pyx_v_wft->nd;
  __pyx_v_ndim = __pyx_t_2;
+460:     axis = axis if axis >= 0 else ndim + axis
  if (((__pyx_v_axis >= 0) != 0)) {
    __pyx_t_2 = __pyx_v_axis;
  } else {
    __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis);
  }
  __pyx_v_axis = __pyx_t_2;
+461:     axis = axis if axis <= ndim - 1 else ndim - 1
  if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) {
    __pyx_t_3 = __pyx_v_axis;
  } else {
    __pyx_t_3 = (__pyx_v_ndim - 1);
  }
  __pyx_v_axis = __pyx_t_3;
+462:     cdef np.npy_intp istride = np.PyArray_STRIDE(wft, axis) / np.PyArray_ITEMSIZE(wft)
  __pyx_v_istride = (PyArray_STRIDE(__pyx_v_wft, __pyx_v_axis) / PyArray_ITEMSIZE(__pyx_v_wft));
+463:     cdef np.npy_intp npts = np.PyArray_DIM(wft, axis)
  __pyx_v_npts = PyArray_DIM(__pyx_v_wft, __pyx_v_axis);
+464:     cdef np.npy_intp *dims = wft.shape
  __pyx_t_4 = __pyx_v_wft->dimensions;
  __pyx_v_dims = __pyx_t_4;
+465:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_COMPLEX128)
  __pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 465, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_5);
  __pyx_t_5 = 0;
+466:     cdef complex *_out = <complex *>np.PyArray_DATA(out)
  __pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+467:     cdef complex *_inp = <complex *>np.PyArray_DATA(wft)
  __pyx_v__inp = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_wft));
+468:     cdef int fail = 0
  __pyx_v_fail = 0;
+469:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }
+470:         if backend == 'fftw':
        __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 470, __pyx_L4_error)
        __pyx_t_7 = (__pyx_t_6 != 0);
        if (__pyx_t_7) {
/* … */
          goto __pyx_L6;
        }
+471:             fraunhofer_fftw(_out, _inp, isize, npts, istride, dx0, dx, z, wl, num_threads)
          fraunhofer_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+472:         elif backend == 'numpy':
        __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 472, __pyx_L4_error)
        __pyx_t_6 = (__pyx_t_7 != 0);
        if (__pyx_t_6) {
/* … */
          goto __pyx_L6;
        }
+473:             fail = fraunhofer_np(_out, _inp, isize, npts, istride, dx0, dx, z, wl, num_threads)
          __pyx_v_fail = fraunhofer_np(__pyx_v__out, __pyx_v__inp, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+474:             if fail:
          __pyx_t_6 = (__pyx_v_fail != 0);
          if (__pyx_t_6) {
/* … */
          }
+475:                 raise RuntimeError('NumPy FFT exited with error')
            {
                #ifdef WITH_THREAD
                PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
                #endif
                /*try:*/ {
                  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 475, __pyx_L9_error)
                  __Pyx_GOTREF(__pyx_t_5);
                  __Pyx_Raise(__pyx_t_5, 0, 0, 0);
                  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                  __PYX_ERR(0, 475, __pyx_L9_error)
                }
                /*finally:*/ {
                  __pyx_L9_error: {
                    #ifdef WITH_THREAD
                    __Pyx_PyGILState_Release(__pyx_gilstate_save);
                    #endif
                    goto __pyx_L4_error;
                  }
                }
            }
 476:         else:
+477:             raise ValueError('{:s} is invalid backend'.format(backend))
        /*else*/ {
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 477, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_8 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_8)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_8);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend);
                __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
                if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 477, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_5);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 477, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 477, __pyx_L12_error)
              }
              /*finally:*/ {
                __pyx_L12_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L4_error;
                }
              }
          }
        }
        __pyx_L6:;
      }
+478:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 479: 
+480: def bar_positions(x0: cython.double, x1: cython.double, b_dx: cython.double,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_13bar_positions(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_12bar_positions[] = "bar_positions(double x0: cython.double, double x1: cython.double, double b_dx: cython.double, double rd: cython.double, unsigned long seed: cython.ulong) -> np.ndarray\nGenerate a coordinate array of randomized barcode's bar positions.\n\n    Parameters\n    ----------\n    x0 : float\n        Barcode's lower bound along the x axis [um].\n    x1 : float\n        Barcode's upper bound along the x axis [um].\n    b_dx : float\n        Average bar's size [um].\n    rd : float\n        Random deviation of barcode's bar positions (0.0 - 1.0).\n    seed : int\n        Seed used for pseudo random number generation.\n\n    Returns\n    -------\n    bx_arr : numpy.ndarray\n        Array of barcode's bar coordinates.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_13bar_positions = {"bar_positions", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_13bar_positions, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_12bar_positions};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_13bar_positions(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  double __pyx_v_x0;
  double __pyx_v_x1;
  double __pyx_v_b_dx;
  double __pyx_v_rd;
  unsigned long __pyx_v_seed;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("bar_positions (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x0,&__pyx_n_s_x1,&__pyx_n_s_b_dx,&__pyx_n_s_rd,&__pyx_n_s_seed,0};
    PyObject* values[5] = {0,0,0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x0)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 1); __PYX_ERR(0, 480, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_b_dx)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 2); __PYX_ERR(0, 480, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rd)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 3); __PYX_ERR(0, 480, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 4); __PYX_ERR(0, 480, __pyx_L3_error)
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bar_positions") < 0)) __PYX_ERR(0, 480, __pyx_L3_error)
      }
    } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
    }
    __pyx_v_x0 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 480, __pyx_L3_error)
    __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 480, __pyx_L3_error)
    __pyx_v_b_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_b_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 480, __pyx_L3_error)
    __pyx_v_rd = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_rd == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 481, __pyx_L3_error)
    __pyx_v_seed = __Pyx_PyInt_As_unsigned_long(values[4]); if (unlikely((__pyx_v_seed == (unsigned long)-1) && PyErr_Occurred())) __PYX_ERR(0, 481, __pyx_L3_error)
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 480, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.bar_positions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_12bar_positions(__pyx_self, __pyx_v_x0, __pyx_v_x1, __pyx_v_b_dx, __pyx_v_rd, __pyx_v_seed);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_12bar_positions(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x0, double __pyx_v_x1, double __pyx_v_b_dx, double __pyx_v_rd, unsigned long __pyx_v_seed) {
  npy_intp __pyx_v_size;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_bars = 0;
  double *__pyx_v__bars;
  __Pyx_LocalBuf_ND __pyx_pybuffernd_bars;
  __Pyx_Buffer __pyx_pybuffer_bars;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("bar_positions", 0);
  __pyx_pybuffer_bars.pybuffer.buf = NULL;
  __pyx_pybuffer_bars.refcount = 0;
  __pyx_pybuffernd_bars.data = NULL;
  __pyx_pybuffernd_bars.rcbuffer = &__pyx_pybuffer_bars;
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_3);
  __Pyx_XDECREF(__pyx_t_4);
  { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_bars.rcbuffer->pybuffer);
  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
  __Pyx_AddTraceback("pyrost.bin.simulation.bar_positions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  goto __pyx_L2;
  __pyx_L0:;
  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_bars.rcbuffer->pybuffer);
  __pyx_L2:;
  __Pyx_XDECREF((PyObject *)__pyx_v_bars);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__23 = PyTuple_Pack(9, __pyx_n_s_x0, __pyx_n_s_x1, __pyx_n_s_b_dx, __pyx_n_s_rd, __pyx_n_s_seed, __pyx_n_s_size, __pyx_n_s_dims, __pyx_n_s_bars, __pyx_n_s_bars_2); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__23);
  __Pyx_GIVEREF(__pyx_tuple__23);
/* … */
  __pyx_t_4 = __Pyx_PyDict_NewPresized(6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_x0, __pyx_n_u_double) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_x1, __pyx_n_u_double) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_b_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_rd, __pyx_n_u_double) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_seed, __pyx_kp_u_unsigned_long) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_13bar_positions, 0, __pyx_n_s_bar_positions, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_4);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_bar_positions, __pyx_t_5) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(5, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_bar_positions, 480, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 480, __pyx_L1_error)
 481:                   rd: cython.double, seed: cython.ulong) -> np.ndarray:
 482:     """Generate a coordinate array of randomized barcode's bar positions.
 483: 
 484:     Parameters
 485:     ----------
 486:     x0 : float
 487:         Barcode's lower bound along the x axis [um].
 488:     x1 : float
 489:         Barcode's upper bound along the x axis [um].
 490:     b_dx : float
 491:         Average bar's size [um].
 492:     rd : float
 493:         Random deviation of barcode's bar positions (0.0 - 1.0).
 494:     seed : int
 495:         Seed used for pseudo random number generation.
 496: 
 497:     Returns
 498:     -------
 499:     bx_arr : numpy.ndarray
 500:         Array of barcode's bar coordinates.
 501:     """
+502:     cdef np.npy_intp size = 2 * (<np.npy_intp>((x1 - x0) / 2 / b_dx) + 1) if x1 > x0 else 0
  if (((__pyx_v_x1 > __pyx_v_x0) != 0)) {
    __pyx_t_1 = (2 * (((npy_intp)(((__pyx_v_x1 - __pyx_v_x0) / 2.0) / __pyx_v_b_dx)) + 1));
  } else {
    __pyx_t_1 = 0;
  }
  __pyx_v_size = __pyx_t_1;
+503:     cdef np.npy_intp *dims = [size,]
  __pyx_t_2[0] = __pyx_v_size;
  __pyx_v_dims = __pyx_t_2;
+504:     cdef np.ndarray[double] bars = <np.ndarray>np.PyArray_SimpleNew(1, dims, np.NPY_FLOAT64)
  __pyx_t_3 = PyArray_SimpleNew(1, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 504, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_3);
  __pyx_t_4 = __pyx_t_3;
  __Pyx_INCREF(__pyx_t_4);
  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
  {
    __Pyx_BufFmt_StackElem __pyx_stack[1];
    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_bars.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_4), &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
      __pyx_v_bars = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_bars.rcbuffer->pybuffer.buf = NULL;
      __PYX_ERR(0, 504, __pyx_L1_error)
    } else {__pyx_pybuffernd_bars.diminfo[0].strides = __pyx_pybuffernd_bars.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_bars.diminfo[0].shape = __pyx_pybuffernd_bars.rcbuffer->pybuffer.shape[0];
    }
  }
  __pyx_v_bars = ((PyArrayObject *)__pyx_t_4);
  __pyx_t_4 = 0;
+505:     cdef double *_bars = <double *>np.PyArray_DATA(bars)
  __pyx_v__bars = ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_bars)));
+506:     if size:
  __pyx_t_5 = (__pyx_v_size != 0);
  if (__pyx_t_5) {
/* … */
  }
+507:         with nogil:
    {
        #ifdef WITH_THREAD
        PyThreadState *_save;
        Py_UNBLOCK_THREADS
        __Pyx_FastGIL_Remember();
        #endif
        /*try:*/ {
/* … */
        /*finally:*/ {
          /*normal exit:*/{
            #ifdef WITH_THREAD
            __Pyx_FastGIL_Forget();
            Py_BLOCK_THREADS
            #endif
            goto __pyx_L6;
          }
          __pyx_L6:;
        }
    }
+508:             barcode_bars(_bars, size, x0, b_dx, rd, seed)
          barcode_bars(__pyx_v__bars, __pyx_v_size, __pyx_v_x0, __pyx_v_b_dx, __pyx_v_rd, __pyx_v_seed);
        }
+509:     return bars
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_bars));
  __pyx_r = ((PyArrayObject *)__pyx_v_bars);
  goto __pyx_L0;
 510: 
+511: cdef np.ndarray ml_profile_wrapper(np.ndarray x_arr, np.ndarray layers, complex mt0,
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_layers, __pyx_t_double_complex __pyx_v_mt0, __pyx_t_double_complex __pyx_v_mt1, __pyx_t_double_complex __pyx_v_mt2, double __pyx_v_sigma, unsigned int __pyx_v_num_threads) {
  int __pyx_v_indim;
  int __pyx_v_lndim;
  npy_intp __pyx_v_isize;
  npy_intp __pyx_v_lsize;
  npy_intp __pyx_v_nlyr;
  npy_intp *__pyx_v_dims;
  int __pyx_v_i;
  PyArrayObject *__pyx_v_out = 0;
  __pyx_t_double_complex *__pyx_v__out;
  double *__pyx_v__x;
  double *__pyx_v__lyrs;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("ml_profile_wrapper", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_x_arr);
  __Pyx_INCREF((PyObject *)__pyx_v_layers);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("pyrost.bin.simulation.ml_profile_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = 0;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_x_arr);
  __Pyx_XDECREF((PyObject *)__pyx_v_layers);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
 512:                                    complex mt1, complex mt2, double sigma, unsigned num_threads):
+513:     x_arr = np.PyArray_GETCONTIGUOUS(x_arr)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_x_arr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 513, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_x_arr, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+514:     x_arr = np.PyArray_Cast(x_arr, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_x_arr, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 514, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 514, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_x_arr, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+515:     layers = np.PyArray_GETCONTIGUOUS(layers)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_layers)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 515, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_layers, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+516:     layers = np.PyArray_Cast(layers, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_layers, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 516, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_layers, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 517: 
+518:     cdef int indim = x_arr.ndim
  __pyx_t_2 = __pyx_v_x_arr->nd;
  __pyx_v_indim = __pyx_t_2;
+519:     cdef int lndim = layers.ndim
  __pyx_t_2 = __pyx_v_layers->nd;
  __pyx_v_lndim = __pyx_t_2;
+520:     cdef np.npy_intp isize = np.PyArray_SIZE(x_arr)
  __pyx_v_isize = PyArray_SIZE(__pyx_v_x_arr);
+521:     cdef np.npy_intp lsize = np.PyArray_SIZE(layers)
  __pyx_v_lsize = PyArray_SIZE(__pyx_v_layers);
+522:     cdef np.npy_intp nlyr = layers.shape[lndim - 1]
  __pyx_v_nlyr = (__pyx_v_layers->dimensions[(__pyx_v_lndim - 1)]);
+523:     cdef np.npy_intp *dims = <np.npy_intp *>malloc((indim + lndim - 1) * sizeof(np.npy_intp))
  __pyx_v_dims = ((npy_intp *)malloc((((__pyx_v_indim + __pyx_v_lndim) - 1) * (sizeof(npy_intp)))));
+524:     if dims is NULL:
  __pyx_t_3 = ((__pyx_v_dims == NULL) != 0);
  if (__pyx_t_3) {
/* … */
  }
+525:         abort()
    abort();
 526:     cdef int i
+527:     for i in range(lndim - 1):
  __pyx_t_4 = (__pyx_v_lndim - 1);
  __pyx_t_5 = __pyx_t_4;
  for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_5; __pyx_t_2+=1) {
    __pyx_v_i = __pyx_t_2;
+528:         dims[i] = layers.shape[i]
    (__pyx_v_dims[__pyx_v_i]) = (__pyx_v_layers->dimensions[__pyx_v_i]);
  }
+529:     for i in range(indim):
  __pyx_t_2 = __pyx_v_indim;
  __pyx_t_6 = __pyx_t_2;
  for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
    __pyx_v_i = __pyx_t_7;
+530:         dims[i + lndim - 1] = x_arr.shape[i]
    (__pyx_v_dims[((__pyx_v_i + __pyx_v_lndim) - 1)]) = (__pyx_v_x_arr->dimensions[__pyx_v_i]);
  }
+531:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(indim + lndim - 1, dims, np.NPY_COMPLEX128)
  __pyx_t_1 = PyArray_SimpleNew(((__pyx_v_indim + __pyx_v_lndim) - 1), __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 531, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_8 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_8);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_8);
  __pyx_t_8 = 0;
+532:     cdef complex *_out = <complex *>np.PyArray_DATA(out)
  __pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+533:     cdef double *_x = <double *>np.PyArray_DATA(x_arr)
  __pyx_v__x = ((double *)PyArray_DATA(__pyx_v_x_arr));
+534:     cdef double *_lyrs = <double *>np.PyArray_DATA(layers)
  __pyx_v__lyrs = ((double *)PyArray_DATA(__pyx_v_layers));
+535:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L10;
        }
        __pyx_L10:;
      }
  }
+536:         ml_profile(_out, _x, _lyrs, isize, lsize, nlyr, mt0, mt1, mt2, sigma, num_threads)
        ml_profile(__pyx_v__out, __pyx_v__x, __pyx_v__lyrs, __pyx_v_isize, __pyx_v_lsize, __pyx_v_nlyr, __pyx_v_mt0, __pyx_v_mt1, __pyx_v_mt2, __pyx_v_sigma, __pyx_v_num_threads);
      }
+537:     free(dims)
  free(__pyx_v_dims);
+538:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 539: 
+540: def barcode_profile(x_arr: np.ndarray, bars: np.ndarray, bulk_atn: cython.double,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_15barcode_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_14barcode_profile[] = "barcode_profile(ndarray x_arr: np.ndarray, ndarray bars: np.ndarray, double bulk_atn: cython.double, double bar_atn: cython.double, double bar_sigma: cython.double, unsigned int num_threads: cython.uint) -> np.ndarray\nReturn an array of barcode's transmission profile calculated\n    at `x_arr` coordinates.\n\n    Parameters\n    ----------\n    x_arr : numpy.ndarray\n        Array of the coordinates, where the transmission coefficients\n        are calculated [um].    \n    bars : numpy.ndarray\n        Coordinates of barcode's bar positions [um].\n    bulk_atn : float\n        Barcode's bulk attenuation coefficient (0.0 - 1.0).\n    bar_atn : float\n        Barcode's bar attenuation coefficient (0.0 - 1.0).\n    bar_sigma : float\n        Bar's blurriness width [um].\n    num_threads : int, optional\n        Number of threads.\n    \n    Returns\n    -------\n    bar_profile : numpy.ndarray\n        Array of barcode's transmission profiles.\n\n    Notes\n    -----\n    Barcode's transmission profile is simulated with a set\n    of error functions:\n    \n    .. math::\n        \\begin{multline}\n            T_{b}(x) = 1 - \\frac{T_{bulk}}{2} \\left\\{\n            \\mathrm{erf}\\left[ \\frac{x - x_{bar}[0]}{\\sqrt{2} \\sigma} \\right] +\n            \\mathrm{erf}\\left[ \\frac{x_{bar}[n - 1] - x}{\\sqrt{2} \\sigma} \\right]\n            \\right\\} -\\\\\n            \\frac{T_{bar}}{4} \\sum_{i = 1}^{n - 2} \\left\\{\n            2 \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i]}{\\sqrt{2} \\sigma} \\right] -\n            \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i - 1]}{\\sqrt{2} \\sigma} \\right] -\n            \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i + 1]}{\\sqrt{2} \\sigma} \\right]\n            \\right\\}\n        \\end{multline}\n    \n    where :math:`x_{bar}` is an array of bar coordinates.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_15barcode_profile = {"barcode_profile", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_15barcode_profile, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_14barcode_profile};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_15barcode_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_x_arr = 0;
  PyArrayObject *__pyx_v_bars = 0;
  double __pyx_v_bulk_atn;
  double __pyx_v_bar_atn;
  double __pyx_v_bar_sigma;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("barcode_profile (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x_arr,&__pyx_n_s_bars,&__pyx_n_s_bulk_atn,&__pyx_n_s_bar_atn,&__pyx_n_s_bar_sigma,&__pyx_n_s_num_threads,0};
    PyObject* values[6] = {0,0,0,0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x_arr)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bars)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 1); __PYX_ERR(0, 540, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bulk_atn)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 2); __PYX_ERR(0, 540, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bar_atn)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 3); __PYX_ERR(0, 540, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bar_sigma)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 4); __PYX_ERR(0, 540, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 5); __PYX_ERR(0, 540, __pyx_L3_error)
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "barcode_profile") < 0)) __PYX_ERR(0, 540, __pyx_L3_error)
      }
    } else if (PyTuple_GET_SIZE(__pyx_args) != 6) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
      values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
    }
    __pyx_v_x_arr = ((PyArrayObject *)values[0]);
    __pyx_v_bars = ((PyArrayObject *)values[1]);
    __pyx_v_bulk_atn = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_bulk_atn == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 540, __pyx_L3_error)
    __pyx_v_bar_atn = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_bar_atn == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 541, __pyx_L3_error)
    __pyx_v_bar_sigma = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_bar_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 541, __pyx_L3_error)
    __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[5]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 542, __pyx_L3_error)
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 540, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.barcode_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_arr), __pyx_ptype_5numpy_ndarray, 1, "x_arr", 0))) __PYX_ERR(0, 540, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_bars), __pyx_ptype_5numpy_ndarray, 1, "bars", 0))) __PYX_ERR(0, 540, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_14barcode_profile(__pyx_self, __pyx_v_x_arr, __pyx_v_bars, __pyx_v_bulk_atn, __pyx_v_bar_atn, __pyx_v_bar_sigma, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_14barcode_profile(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_bars, double __pyx_v_bulk_atn, double __pyx_v_bar_atn, double __pyx_v_bar_sigma, unsigned int __pyx_v_num_threads) {
  __pyx_t_double_complex __pyx_v_mt0;
  __pyx_t_double_complex __pyx_v_mt1;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("barcode_profile", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("pyrost.bin.simulation.barcode_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__25 = PyTuple_Pack(8, __pyx_n_s_x_arr, __pyx_n_s_bars, __pyx_n_s_bulk_atn, __pyx_n_s_bar_atn, __pyx_n_s_bar_sigma, __pyx_n_s_num_threads, __pyx_n_s_mt0, __pyx_n_s_mt1); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__25);
  __Pyx_GIVEREF(__pyx_tuple__25);
/* … */
  __pyx_t_5 = __Pyx_PyDict_NewPresized(7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_x_arr, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_bars, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_bulk_atn, __pyx_n_u_double) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_bar_atn, __pyx_n_u_double) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_bar_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_15barcode_profile, 0, __pyx_n_s_barcode_profile, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_5);
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_barcode_profile, __pyx_t_4) < 0) __PYX_ERR(0, 540, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(6, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_barcode_profile, 540, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 540, __pyx_L1_error)
 541:                     bar_atn: cython.double, bar_sigma: cython.double,
 542:                     num_threads: cython.uint) -> np.ndarray:
 543:     r"""Return an array of barcode's transmission profile calculated
 544:     at `x_arr` coordinates.
 545: 
 546:     Parameters
 547:     ----------
 548:     x_arr : numpy.ndarray
 549:         Array of the coordinates, where the transmission coefficients
 550:         are calculated [um].    
 551:     bars : numpy.ndarray
 552:         Coordinates of barcode's bar positions [um].
 553:     bulk_atn : float
 554:         Barcode's bulk attenuation coefficient (0.0 - 1.0).
 555:     bar_atn : float
 556:         Barcode's bar attenuation coefficient (0.0 - 1.0).
 557:     bar_sigma : float
 558:         Bar's blurriness width [um].
 559:     num_threads : int, optional
 560:         Number of threads.
 561:     
 562:     Returns
 563:     -------
 564:     bar_profile : numpy.ndarray
 565:         Array of barcode's transmission profiles.
 566: 
 567:     Notes
 568:     -----
 569:     Barcode's transmission profile is simulated with a set
 570:     of error functions:
 571:     
 572:     .. math::
 573:         \begin{multline}
 574:             T_{b}(x) = 1 - \frac{T_{bulk}}{2} \left\{
 575:             \mathrm{erf}\left[ \frac{x - x_{bar}[0]}{\sqrt{2} \sigma} \right] +
 576:             \mathrm{erf}\left[ \frac{x_{bar}[n - 1] - x}{\sqrt{2} \sigma} \right]
 577:             \right\} -\\
 578:             \frac{T_{bar}}{4} \sum_{i = 1}^{n - 2} \left\{
 579:             2 \mathrm{erf}\left[ \frac{x - x_{bar}[i]}{\sqrt{2} \sigma} \right] -
 580:             \mathrm{erf}\left[ \frac{x - x_{bar}[i - 1]}{\sqrt{2} \sigma} \right] -
 581:             \mathrm{erf}\left[ \frac{x - x_{bar}[i + 1]}{\sqrt{2} \sigma} \right]
 582:             \right\}
 583:         \end{multline}
 584:     
 585:     where :math:`x_{bar}` is an array of bar coordinates.
 586:     """
+587:     cdef complex mt0 = -1j * log(1 - bulk_atn)
  __pyx_v_mt0 = __Pyx_c_prod_double(__Pyx_c_neg_double(__pyx_t_double_complex_from_parts(0, 1.0)), __pyx_t_double_complex_from_parts(log((1.0 - __pyx_v_bulk_atn)), 0));
+588:     cdef complex mt1 = -1j * log(1 - bar_atn)
  __pyx_v_mt1 = __Pyx_c_prod_double(__Pyx_c_neg_double(__pyx_t_double_complex_from_parts(0, 1.0)), __pyx_t_double_complex_from_parts(log((1.0 - __pyx_v_bar_atn)), 0));
+589:     return ml_profile_wrapper(x_arr, bars, mt0, mt1, 0., bar_sigma, num_threads)
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(__pyx_v_x_arr, __pyx_v_bars, __pyx_v_mt0, __pyx_v_mt1, __pyx_t_double_complex_from_parts(0., 0), __pyx_v_bar_sigma, __pyx_v_num_threads)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 589, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = ((PyArrayObject *)__pyx_t_1);
  __pyx_t_1 = 0;
  goto __pyx_L0;
 590: 
+591: def mll_profile(x_arr: np.ndarray, layers: np.ndarray, complex mt0,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_17mll_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_16mll_profile[] = "mll_profile(ndarray x_arr: np.ndarray, ndarray layers: np.ndarray, double complex mt0, double complex mt1, double sigma: cython.double, unsigned int num_threads: cython.uint) -> np.ndarray\nReturn an array of MLL's transmission profile calculated\n    at `x_arr` coordinates.\n\n    Parameters\n    ----------\n    x_arr : numpy.ndarray\n        Array of the coordinates, where the transmission coefficients\n        are calculated [um].    \n    layers : numpy.ndarray\n        Coordinates of MLL's layers positions [um].\n    mt0 : complex\n        Fresnel transmission coefficient for the first material of MLL's\n        bilayer.\n    mt1 : complex\n        Fresnel transmission coefficient for the first material of MLL's\n        bilayer.\n    sigma : float\n        Interdiffusion length [um].\n    num_threads : int, optional\n        Number of threads.\n    \n    Returns\n    -------\n    bar_profile : numpy.ndarray\n        Array of barcode's transmission profiles.\n\n    Notes\n    -----\n    MLL's transmission profile is simulated with a set\n    of error functions:\n    \n    .. math::\n        \\begin{multline}\n            T_{b}(x) = 1 - \\frac{T_{bulk}}{2} \\left\\{\n            \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[0]}{\\sqrt{2} \\sigma} \\right] +\n            \\mathrm{erf}\\left[ \\frac{x_{lyr}[n - 1] - x}{\\sqrt{2} \\sigma} \\right]\n            \\right\\} -\\\\\n            \\frac{T_{bar}}{4} \\sum_{i = 1}^{n - 2} \\left\\{\n            2 \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i]}{\\sqrt{2} \\sigma} \\right] -\n            \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i - 1]}{\\sqrt{2} \\sigma} \\right] -\n            \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i + 1]}{\\sqrt{2} \\sigma} \\right]\n            \\right\\}\n        \\end{multline}\n    \n    where :math:`x_{lyr}` is an array of MLL's layer coordinates.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_17mll_profile = {"mll_profile", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_17mll_profile, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_16mll_profile};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_17mll_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_x_arr = 0;
  PyArrayObject *__pyx_v_layers = 0;
  __pyx_t_double_complex __pyx_v_mt0;
  __pyx_t_double_complex __pyx_v_mt1;
  double __pyx_v_sigma;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("mll_profile (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x_arr,&__pyx_n_s_layers,&__pyx_n_s_mt0,&__pyx_n_s_mt1,&__pyx_n_s_sigma,&__pyx_n_s_num_threads,0};
    PyObject* values[6] = {0,0,0,0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x_arr)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_layers)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 1); __PYX_ERR(0, 591, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mt0)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 2); __PYX_ERR(0, 591, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mt1)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 3); __PYX_ERR(0, 591, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 4); __PYX_ERR(0, 591, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 5); __PYX_ERR(0, 591, __pyx_L3_error)
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mll_profile") < 0)) __PYX_ERR(0, 591, __pyx_L3_error)
      }
    } else if (PyTuple_GET_SIZE(__pyx_args) != 6) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
      values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
    }
    __pyx_v_x_arr = ((PyArrayObject *)values[0]);
    __pyx_v_layers = ((PyArrayObject *)values[1]);
    __pyx_v_mt0 = __Pyx_PyComplex_As___pyx_t_double_complex(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 591, __pyx_L3_error)
    __pyx_v_mt1 = __Pyx_PyComplex_As___pyx_t_double_complex(values[3]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 592, __pyx_L3_error)
    __pyx_v_sigma = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 592, __pyx_L3_error)
    __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[5]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 592, __pyx_L3_error)
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 591, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.mll_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_arr), __pyx_ptype_5numpy_ndarray, 1, "x_arr", 0))) __PYX_ERR(0, 591, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_layers), __pyx_ptype_5numpy_ndarray, 1, "layers", 0))) __PYX_ERR(0, 591, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_16mll_profile(__pyx_self, __pyx_v_x_arr, __pyx_v_layers, __pyx_v_mt0, __pyx_v_mt1, __pyx_v_sigma, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_16mll_profile(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_layers, __pyx_t_double_complex __pyx_v_mt0, __pyx_t_double_complex __pyx_v_mt1, double __pyx_v_sigma, unsigned int __pyx_v_num_threads) {
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("mll_profile", 0);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_AddTraceback("pyrost.bin.simulation.mll_profile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__27 = PyTuple_Pack(6, __pyx_n_s_x_arr, __pyx_n_s_layers, __pyx_n_s_mt0, __pyx_n_s_mt1, __pyx_n_s_sigma, __pyx_n_s_num_threads); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__27);
  __Pyx_GIVEREF(__pyx_tuple__27);
/* … */
  __pyx_t_4 = __Pyx_PyDict_NewPresized(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_x_arr, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_layers, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_17mll_profile, 0, __pyx_n_s_mll_profile, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_4);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_mll_profile, __pyx_t_5) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
  __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(6, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_mll_profile, 591, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 591, __pyx_L1_error)
 592:                 complex mt1, sigma: cython.double, num_threads: cython.uint) -> np.ndarray:
 593:     r"""Return an array of MLL's transmission profile calculated
 594:     at `x_arr` coordinates.
 595: 
 596:     Parameters
 597:     ----------
 598:     x_arr : numpy.ndarray
 599:         Array of the coordinates, where the transmission coefficients
 600:         are calculated [um].    
 601:     layers : numpy.ndarray
 602:         Coordinates of MLL's layers positions [um].
 603:     mt0 : complex
 604:         Fresnel transmission coefficient for the first material of MLL's
 605:         bilayer.
 606:     mt1 : complex
 607:         Fresnel transmission coefficient for the first material of MLL's
 608:         bilayer.
 609:     sigma : float
 610:         Interdiffusion length [um].
 611:     num_threads : int, optional
 612:         Number of threads.
 613:     
 614:     Returns
 615:     -------
 616:     bar_profile : numpy.ndarray
 617:         Array of barcode's transmission profiles.
 618: 
 619:     Notes
 620:     -----
 621:     MLL's transmission profile is simulated with a set
 622:     of error functions:
 623:     
 624:     .. math::
 625:         \begin{multline}
 626:             T_{b}(x) = 1 - \frac{T_{bulk}}{2} \left\{
 627:             \mathrm{erf}\left[ \frac{x - x_{lyr}[0]}{\sqrt{2} \sigma} \right] +
 628:             \mathrm{erf}\left[ \frac{x_{lyr}[n - 1] - x}{\sqrt{2} \sigma} \right]
 629:             \right\} -\\
 630:             \frac{T_{bar}}{4} \sum_{i = 1}^{n - 2} \left\{
 631:             2 \mathrm{erf}\left[ \frac{x - x_{lyr}[i]}{\sqrt{2} \sigma} \right] -
 632:             \mathrm{erf}\left[ \frac{x - x_{lyr}[i - 1]}{\sqrt{2} \sigma} \right] -
 633:             \mathrm{erf}\left[ \frac{x - x_{lyr}[i + 1]}{\sqrt{2} \sigma} \right]
 634:             \right\}
 635:         \end{multline}
 636:     
 637:     where :math:`x_{lyr}` is an array of MLL's layer coordinates.
 638:     """
+639:     return ml_profile_wrapper(x_arr, layers, 0., mt0, mt1, sigma, num_threads)
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(__pyx_v_x_arr, __pyx_v_layers, __pyx_t_double_complex_from_parts(0., 0), __pyx_v_mt0, __pyx_v_mt1, __pyx_v_sigma, __pyx_v_num_threads)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 639, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_r = ((PyArrayObject *)__pyx_t_1);
  __pyx_t_1 = 0;
  goto __pyx_L0;
 640: 
+641: def fft_convolve(array: np.ndarray, kernel: np.ndarray, axis: cython.int=-1,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_19fft_convolve(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_18fft_convolve[] = "fft_convolve(ndarray array: np.ndarray, ndarray kernel: np.ndarray, int axis: cython.int = -1, unicode mode: str = u'constant', double cval: cython.double = 0.0, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nConvolve a multi-dimensional `array` with one-dimensional `kernel` along the\n    `axis` by means of FFT. Output has the same size as `array`.\n\n    Parameters\n    ----------\n    array : numpy.ndarray\n        Input array.\n    kernel : numpy.ndarray\n        Kernel array.\n    axis : int, optional\n        Array axis along which convolution is performed.\n    mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n        The mode parameter determines how the input array is extended when the filter\n        overlaps a border. Default value is 'constant'. The valid values and their behavior\n        is as follows:\n\n        * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n          values beyond the edge with the same constant value, defined by the `cval`\n          parameter.\n        * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n          the last pixel.\n        * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n          about the center of the last pixel. This mode is also sometimes referred to as\n          whole-sample symmetric.\n        * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n          about the edge of the last pixel. This mode is also sometimes referred to as\n          half-sample symmetric.\n        * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n          to the opposite edge.\n    cval : float, optional\n        Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n    backend : {'fftw', 'numpy'}, optional\n        Choose backend library for the FFT implementation.""\n    num_threads : int, optional\n        Number of threads.\n\n    Returns\n    -------\n    out : numpy.ndarray\n        A multi-dimensional array containing the discrete linear\n        convolution of `array` with `kernel`.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_19fft_convolve = {"fft_convolve", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_19fft_convolve, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_18fft_convolve};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_19fft_convolve(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_array = 0;
  PyArrayObject *__pyx_v_kernel = 0;
  int __pyx_v_axis;
  PyObject *__pyx_v_mode = 0;
  double __pyx_v_cval;
  PyObject *__pyx_v_backend = 0;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("fft_convolve (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_array,&__pyx_n_s_kernel,&__pyx_n_s_axis,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0};
    PyObject* values[7] = {0,0,0,0,0,0,0};
    values[3] = ((PyObject*)((PyObject*)__pyx_n_u_constant));
    values[5] = ((PyObject*)((PyObject*)__pyx_n_u_numpy));
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_array)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kernel)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("fft_convolve", 0, 2, 7, 1); __PYX_ERR(0, 641, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis);
          if (value) { values[2] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
          if (value) { values[3] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval);
          if (value) { values[4] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend);
          if (value) { values[5] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[6] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fft_convolve") < 0)) __PYX_ERR(0, 641, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_array = ((PyArrayObject *)values[0]);
    __pyx_v_kernel = ((PyArrayObject *)values[1]);
    if (values[2]) {
      __pyx_v_axis = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 641, __pyx_L3_error)
    } else {
      __pyx_v_axis = ((int)((int)-1));
    }
    __pyx_v_mode = ((PyObject*)values[3]);
    if (values[4]) {
      __pyx_v_cval = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 642, __pyx_L3_error)
    } else {
      __pyx_v_cval = ((double)((double)0.0));
    }
    __pyx_v_backend = ((PyObject*)values[5]);
    if (values[6]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 643, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("fft_convolve", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 641, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.fft_convolve", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_array), __pyx_ptype_5numpy_ndarray, 1, "array", 0))) __PYX_ERR(0, 641, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_kernel), __pyx_ptype_5numpy_ndarray, 1, "kernel", 0))) __PYX_ERR(0, 641, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 642, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 642, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_18fft_convolve(__pyx_self, __pyx_v_array, __pyx_v_kernel, __pyx_v_axis, __pyx_v_mode, __pyx_v_cval, __pyx_v_backend, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_18fft_convolve(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_array, PyArrayObject *__pyx_v_kernel, int __pyx_v_axis, PyObject *__pyx_v_mode, double __pyx_v_cval, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) {
  npy_intp __pyx_v_isize;
  int __pyx_v_ndim;
  npy_intp __pyx_v_npts;
  npy_intp __pyx_v_istride;
  npy_intp __pyx_v_ksize;
  int __pyx_v__mode;
  npy_intp *__pyx_v_dims;
  PyArrayObject *__pyx_v_out = 0;
  double *__pyx_v__out;
  double *__pyx_v__inp;
  double *__pyx_v__krn;
  int __pyx_v_fail;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("fft_convolve", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_array);
  __Pyx_INCREF((PyObject *)__pyx_v_kernel);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_AddTraceback("pyrost.bin.simulation.fft_convolve", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_array);
  __Pyx_XDECREF((PyObject *)__pyx_v_kernel);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__29 = PyTuple_Pack(19, __pyx_n_s_array, __pyx_n_s_kernel, __pyx_n_s_axis, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_isize, __pyx_n_s_ndim, __pyx_n_s_npts, __pyx_n_s_istride, __pyx_n_s_ksize, __pyx_n_s_mode_2, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_krn, __pyx_n_s_fail); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__29);
  __Pyx_GIVEREF(__pyx_tuple__29);
/* … */
  __pyx_t_5 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_5);
/* … */
  __pyx_t_1 = PyTuple_New(5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_5);
  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_constant));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_constant));
  PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject*)__pyx_n_u_constant));
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4);
  __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy));
  PyTuple_SET_ITEM(__pyx_t_1, 3, ((PyObject*)__pyx_n_u_numpy));
  __Pyx_GIVEREF(__pyx_t_2);
  PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_t_2);
  __pyx_t_5 = 0;
  __pyx_t_4 = 0;
  __pyx_t_2 = 0;
  __pyx_t_2 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_array, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_kernel, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_19fft_convolve, 0, __pyx_n_s_fft_convolve, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_1);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_2);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_fft_convolve, __pyx_t_4) < 0) __PYX_ERR(0, 641, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(7, 0, 19, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_fft_convolve, 641, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 641, __pyx_L1_error)
+642:                  mode: str='constant', cval: cython.double=0.0, backend: str='numpy',
  __pyx_t_4 = PyFloat_FromDouble(((double)0.0)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 642, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
+643:                  num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_2 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 643, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
 644:     """Convolve a multi-dimensional `array` with one-dimensional `kernel` along the
 645:     `axis` by means of FFT. Output has the same size as `array`.
 646: 
 647:     Parameters
 648:     ----------
 649:     array : numpy.ndarray
 650:         Input array.
 651:     kernel : numpy.ndarray
 652:         Kernel array.
 653:     axis : int, optional
 654:         Array axis along which convolution is performed.
 655:     mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
 656:         The mode parameter determines how the input array is extended when the filter
 657:         overlaps a border. Default value is 'constant'. The valid values and their behavior
 658:         is as follows:
 659: 
 660:         * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
 661:           values beyond the edge with the same constant value, defined by the `cval`
 662:           parameter.
 663:         * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
 664:           the last pixel.
 665:         * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
 666:           about the center of the last pixel. This mode is also sometimes referred to as
 667:           whole-sample symmetric.
 668:         * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
 669:           about the edge of the last pixel. This mode is also sometimes referred to as
 670:           half-sample symmetric.
 671:         * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
 672:           to the opposite edge.
 673:     cval : float, optional
 674:         Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
 675:     backend : {'fftw', 'numpy'}, optional
 676:         Choose backend library for the FFT implementation.
 677:     num_threads : int, optional
 678:         Number of threads.
 679: 
 680:     Returns
 681:     -------
 682:     out : numpy.ndarray
 683:         A multi-dimensional array containing the discrete linear
 684:         convolution of `array` with `kernel`.
 685:     """
+686:     array = np.PyArray_GETCONTIGUOUS(array)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_array)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 686, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_array, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+687:     array = np.PyArray_Cast(array, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_array, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 687, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 687, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_array, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+688:     kernel = np.PyArray_GETCONTIGUOUS(kernel)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_kernel)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 688, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_kernel, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+689:     kernel = np.PyArray_Cast(kernel, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_kernel, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 689, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 689, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_kernel, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 690: 
+691:     cdef np.npy_intp isize = np.PyArray_SIZE(array)
  __pyx_v_isize = PyArray_SIZE(__pyx_v_array);
+692:     cdef int ndim = array.ndim
  __pyx_t_2 = __pyx_v_array->nd;
  __pyx_v_ndim = __pyx_t_2;
+693:     axis = axis if axis >= 0 else ndim + axis
  if (((__pyx_v_axis >= 0) != 0)) {
    __pyx_t_2 = __pyx_v_axis;
  } else {
    __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis);
  }
  __pyx_v_axis = __pyx_t_2;
+694:     axis = axis if axis <= ndim - 1 else ndim - 1
  if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) {
    __pyx_t_3 = __pyx_v_axis;
  } else {
    __pyx_t_3 = (__pyx_v_ndim - 1);
  }
  __pyx_v_axis = __pyx_t_3;
+695:     cdef np.npy_intp npts = np.PyArray_DIM(array, axis)
  __pyx_v_npts = PyArray_DIM(__pyx_v_array, __pyx_v_axis);
+696:     cdef np.npy_intp istride = np.PyArray_STRIDE(array, axis) / np.PyArray_ITEMSIZE(array)
  __pyx_v_istride = (PyArray_STRIDE(__pyx_v_array, __pyx_v_axis) / PyArray_ITEMSIZE(__pyx_v_array));
+697:     cdef np.npy_intp ksize = np.PyArray_DIM(kernel, 0)
  __pyx_v_ksize = PyArray_DIM(__pyx_v_kernel, 0);
+698:     cdef int _mode = extend_mode_to_code(mode)
  __pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 698, __pyx_L1_error)
  __pyx_v__mode = __pyx_t_2;
+699:     cdef np.npy_intp *dims = array.shape
  __pyx_t_4 = __pyx_v_array->dimensions;
  __pyx_v_dims = __pyx_t_4;
+700:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 700, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_5);
  __pyx_t_5 = 0;
+701:     cdef double *_out = <double *>np.PyArray_DATA(out)
  __pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+702:     cdef double *_inp = <double *>np.PyArray_DATA(array)
  __pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_array));
+703:     cdef double *_krn = <double *>np.PyArray_DATA(kernel)
  __pyx_v__krn = ((double *)PyArray_DATA(__pyx_v_kernel));
+704:     cdef int fail = 0
  __pyx_v_fail = 0;
+705:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L4_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L5:;
      }
  }
+706:         if backend == 'fftw':
        __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 706, __pyx_L4_error)
        __pyx_t_7 = (__pyx_t_6 != 0);
        if (__pyx_t_7) {
/* … */
          goto __pyx_L6;
        }
+707:             fft_convolve_fftw(_out, _inp, _krn, isize, npts, istride, ksize, _mode, cval, num_threads)
          fft_convolve_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v__krn, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_ksize, __pyx_v__mode, __pyx_v_cval, __pyx_v_num_threads);
+708:         elif backend == 'numpy':
        __pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 708, __pyx_L4_error)
        __pyx_t_6 = (__pyx_t_7 != 0);
        if (__pyx_t_6) {
/* … */
          goto __pyx_L6;
        }
+709:             fail = fft_convolve_np(_out, _inp, _krn, isize, npts, istride, ksize, _mode, cval, num_threads)
          __pyx_v_fail = fft_convolve_np(__pyx_v__out, __pyx_v__inp, __pyx_v__krn, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, __pyx_v_ksize, __pyx_v__mode, __pyx_v_cval, __pyx_v_num_threads);
+710:             if fail:
          __pyx_t_6 = (__pyx_v_fail != 0);
          if (__pyx_t_6) {
/* … */
          }
+711:                 raise RuntimeError('NumPy FFT exited with error')
            {
                #ifdef WITH_THREAD
                PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
                #endif
                /*try:*/ {
                  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 711, __pyx_L9_error)
                  __Pyx_GOTREF(__pyx_t_5);
                  __Pyx_Raise(__pyx_t_5, 0, 0, 0);
                  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                  __PYX_ERR(0, 711, __pyx_L9_error)
                }
                /*finally:*/ {
                  __pyx_L9_error: {
                    #ifdef WITH_THREAD
                    __Pyx_PyGILState_Release(__pyx_gilstate_save);
                    #endif
                    goto __pyx_L4_error;
                  }
                }
            }
 712:         else:
+713:             raise ValueError('{:s} is invalid backend'.format(backend))
        /*else*/ {
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 713, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_8 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_8)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_8);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend);
                __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
                if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 713, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_5);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 713, __pyx_L12_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 713, __pyx_L12_error)
              }
              /*finally:*/ {
                __pyx_L12_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L4_error;
                }
              }
          }
        }
        __pyx_L6:;
      }
+714:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 715: 
+716: def make_frames(pfx: np.ndarray, pfy: np.ndarray, dx: cython.double, dy: cython.double,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_21make_frames(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_20make_frames[] = "make_frames(ndarray pfx: np.ndarray, ndarray pfy: np.ndarray, double dx: cython.double, double dy: cython.double, tuple shape: tuple, long seed: cython.long, unsigned int num_threads: cython.uint) -> np.ndarray\nGenerate intensity frames from one-dimensional intensity profiles (`pfx`,\n    `pfy`) and whitefield profiles (`wfx`, `wfy`). Intensity profiles resized into\n    the shape of a frame. Poisson noise is applied if `seed` is non-negative.\n\n    Parameters\n    ----------\n    pfx : numpy.ndarray\n        Intensity profile along the x axis.\n    pfy : numpy.ndarray\n        Intensity profile along the y axis.\n    dx : float\n        Sampling interval along the x axis [um].\n    dy : float\n        Sampling interval along the y axis [um].\n    shape : tuple\n        Shape of the detector array.\n    seed : int, optional\n        Seed for pseudo-random number generation.\n    num_threads : int, optional\n        Number of threads.\n\n    Returns\n    -------\n    frames : numpy.ndarray\n        Intensity frames.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_21make_frames = {"make_frames", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_21make_frames, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_20make_frames};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_21make_frames(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_pfx = 0;
  PyArrayObject *__pyx_v_pfy = 0;
  double __pyx_v_dx;
  double __pyx_v_dy;
  PyObject *__pyx_v_shape = 0;
  long __pyx_v_seed;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("make_frames (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pfx,&__pyx_n_s_pfy,&__pyx_n_s_dx,&__pyx_n_s_dy,&__pyx_n_s_shape,&__pyx_n_s_seed,&__pyx_n_s_num_threads,0};
    PyObject* values[7] = {0,0,0,0,0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
        CYTHON_FALLTHROUGH;
        case  6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
        CYTHON_FALLTHROUGH;
        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pfx)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pfy)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 1); __PYX_ERR(0, 716, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 2); __PYX_ERR(0, 716, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dy)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 3); __PYX_ERR(0, 716, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 4); __PYX_ERR(0, 716, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  5:
        if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 5); __PYX_ERR(0, 716, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  6:
        if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 6); __PYX_ERR(0, 716, __pyx_L3_error)
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_frames") < 0)) __PYX_ERR(0, 716, __pyx_L3_error)
      }
    } else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
      values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
      values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
    }
    __pyx_v_pfx = ((PyArrayObject *)values[0]);
    __pyx_v_pfy = ((PyArrayObject *)values[1]);
    __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 716, __pyx_L3_error)
    __pyx_v_dy = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_dy == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 716, __pyx_L3_error)
    __pyx_v_shape = ((PyObject*)values[4]);
    __pyx_v_seed = __Pyx_PyInt_As_long(values[5]); if (unlikely((__pyx_v_seed == (long)-1) && PyErr_Occurred())) __PYX_ERR(0, 717, __pyx_L3_error)
    __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 717, __pyx_L3_error)
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 716, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.make_frames", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pfx), __pyx_ptype_5numpy_ndarray, 1, "pfx", 0))) __PYX_ERR(0, 716, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pfy), __pyx_ptype_5numpy_ndarray, 1, "pfy", 0))) __PYX_ERR(0, 716, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(0, 717, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_20make_frames(__pyx_self, __pyx_v_pfx, __pyx_v_pfy, __pyx_v_dx, __pyx_v_dy, __pyx_v_shape, __pyx_v_seed, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_20make_frames(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_pfx, PyArrayObject *__pyx_v_pfy, double __pyx_v_dx, double __pyx_v_dy, PyObject *__pyx_v_shape, long __pyx_v_seed, unsigned int __pyx_v_num_threads) {
  npy_intp *__pyx_v_oshape;
  PyArrayObject *__pyx_v_out = 0;
  unsigned long *__pyx_v__ishape;
  unsigned long *__pyx_v__oshape;
  double *__pyx_v__out;
  double *__pyx_v__pfx;
  double *__pyx_v__pfy;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("make_frames", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_pfx);
  __Pyx_INCREF((PyObject *)__pyx_v_pfy);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_5);
  __Pyx_AddTraceback("pyrost.bin.simulation.make_frames", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_pfx);
  __Pyx_XDECREF((PyObject *)__pyx_v_pfy);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__31 = PyTuple_Pack(14, __pyx_n_s_pfx, __pyx_n_s_pfy, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_shape, __pyx_n_s_seed, __pyx_n_s_num_threads, __pyx_n_s_oshape, __pyx_n_s_out, __pyx_n_s_ishape, __pyx_n_s_oshape_2, __pyx_n_s_out_2, __pyx_n_s_pfx_2, __pyx_n_s_pfy_2); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__31);
  __Pyx_GIVEREF(__pyx_tuple__31);
/* … */
  __pyx_t_4 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_pfx, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_pfy, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dy, __pyx_n_u_double) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_shape, __pyx_n_u_tuple) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_seed, __pyx_n_u_long) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_21make_frames, 0, __pyx_n_s_make_frames, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_2, __pyx_t_4);
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_make_frames, __pyx_t_2) < 0) __PYX_ERR(0, 716, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
  __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(7, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_make_frames, 716, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 716, __pyx_L1_error)
 717:                 shape: tuple, seed: cython.long, num_threads: cython.uint) -> np.ndarray:
 718:     """Generate intensity frames from one-dimensional intensity profiles (`pfx`,
 719:     `pfy`) and whitefield profiles (`wfx`, `wfy`). Intensity profiles resized into
 720:     the shape of a frame. Poisson noise is applied if `seed` is non-negative.
 721: 
 722:     Parameters
 723:     ----------
 724:     pfx : numpy.ndarray
 725:         Intensity profile along the x axis.
 726:     pfy : numpy.ndarray
 727:         Intensity profile along the y axis.
 728:     dx : float
 729:         Sampling interval along the x axis [um].
 730:     dy : float
 731:         Sampling interval along the y axis [um].
 732:     shape : tuple
 733:         Shape of the detector array.
 734:     seed : int, optional
 735:         Seed for pseudo-random number generation.
 736:     num_threads : int, optional
 737:         Number of threads.
 738: 
 739:     Returns
 740:     -------
 741:     frames : numpy.ndarray
 742:         Intensity frames.
 743:     """
+744:     pfx = np.PyArray_GETCONTIGUOUS(pfx)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_pfx)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 744, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_pfx, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+745:     pfx = np.PyArray_Cast(pfx, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_pfx, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 745, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 745, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_pfx, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+746:     pfy = np.PyArray_GETCONTIGUOUS(pfy)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_pfy)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 746, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_pfy, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+747:     pfy = np.PyArray_Cast(pfy, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_Cast(__pyx_v_pfy, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 747, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 747, __pyx_L1_error)
  __Pyx_DECREF_SET(__pyx_v_pfy, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 748: 
+749:     cdef np.npy_intp *oshape = [pfx.shape[0], <np.npy_intp>(shape[0]), <np.npy_intp>(shape[1])]
  if (unlikely(__pyx_v_shape == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 749, __pyx_L1_error)
  }
  __pyx_t_2 = __Pyx_PyInt_As_Py_intptr_t(PyTuple_GET_ITEM(__pyx_v_shape, 0)); if (unlikely((__pyx_t_2 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 749, __pyx_L1_error)
  if (unlikely(__pyx_v_shape == Py_None)) {
    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
    __PYX_ERR(0, 749, __pyx_L1_error)
  }
  __pyx_t_3 = __Pyx_PyInt_As_Py_intptr_t(PyTuple_GET_ITEM(__pyx_v_shape, 1)); if (unlikely((__pyx_t_3 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 749, __pyx_L1_error)
  __pyx_t_4[0] = (__pyx_v_pfx->dimensions[0]);
  __pyx_t_4[1] = ((npy_intp)__pyx_t_2);
  __pyx_t_4[2] = ((npy_intp)__pyx_t_3);
  __pyx_v_oshape = __pyx_t_4;
+750:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(3, oshape, np.NPY_FLOAT64)
  __pyx_t_1 = PyArray_SimpleNew(3, __pyx_v_oshape, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 750, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_5 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_5);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_5);
  __pyx_t_5 = 0;
+751:     cdef unsigned long *_ishape = [<unsigned long>(pfx.shape[0]), <unsigned long>(pfy.shape[0]),
  __pyx_t_6[0] = ((unsigned long)(__pyx_v_pfx->dimensions[0]));
  __pyx_t_6[1] = ((unsigned long)(__pyx_v_pfy->dimensions[0]));
  __pyx_t_6[2] = ((unsigned long)(__pyx_v_pfx->dimensions[1]));
  __pyx_v__ishape = __pyx_t_6;
 752:                                    <unsigned long>(pfx.shape[1])]
+753:     cdef unsigned long *_oshape = [<unsigned long>(oshape[0]), <unsigned long>(oshape[1]), <unsigned long>(oshape[2])]
  __pyx_t_7[0] = ((unsigned long)(__pyx_v_oshape[0]));
  __pyx_t_7[1] = ((unsigned long)(__pyx_v_oshape[1]));
  __pyx_t_7[2] = ((unsigned long)(__pyx_v_oshape[2]));
  __pyx_v__oshape = __pyx_t_7;
+754:     cdef double *_out = <double *>np.PyArray_DATA(out)
  __pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+755:     cdef double *_pfx = <double *>np.PyArray_DATA(pfx)
  __pyx_v__pfx = ((double *)PyArray_DATA(__pyx_v_pfx));
+756:     cdef double *_pfy = <double *>np.PyArray_DATA(pfy)
  __pyx_v__pfy = ((double *)PyArray_DATA(__pyx_v_pfy));
+757:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L5:;
      }
  }
+758:         frames(_out, _pfx, _pfy, dx, dy, _ishape, _oshape, seed, num_threads)
        frames(__pyx_v__out, __pyx_v__pfx, __pyx_v__pfy, __pyx_v_dx, __pyx_v_dy, __pyx_v__ishape, __pyx_v__oshape, __pyx_v_seed, __pyx_v_num_threads);
      }
+759:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;
 760: 
+761: def make_whitefield(data: np.ndarray, mask: np.ndarray, axis: cython.int=0,
/* Python wrapper */
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_23make_whitefield(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_6pyrost_3bin_10simulation_22make_whitefield[] = "make_whitefield(ndarray data: np.ndarray, ndarray mask: np.ndarray, int axis: cython.int = 0, unsigned int num_threads: cython.uint = 1) -> np.ndarray\nGenerate a whitefield using the median filtering along the `axis`.\n\n    Parameters\n    ----------\n    data : numpy.ndarray\n        Intensity frames.\n    mask : numpy.ndarray\n        Bad pixel mask.\n    axis : int, optional\n        Array axis along which median values are calculated.\n    num_threads : int, optional\n        Number of threads.\n\n    Returns\n    -------\n    wfield : numpy.ndarray\n        Whitefield.\n    ";
static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_23make_whitefield = {"make_whitefield", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_23make_whitefield, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_22make_whitefield};
static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_23make_whitefield(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
  PyArrayObject *__pyx_v_data = 0;
  PyArrayObject *__pyx_v_mask = 0;
  int __pyx_v_axis;
  unsigned int __pyx_v_num_threads;
  PyArrayObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("make_whitefield (wrapper)", 0);
  {
    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_mask,&__pyx_n_s_axis,&__pyx_n_s_num_threads,0};
    PyObject* values[4] = {0,0,0,0};
    if (unlikely(__pyx_kwds)) {
      Py_ssize_t kw_args;
      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
      switch (pos_args) {
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = PyDict_Size(__pyx_kwds);
      switch (pos_args) {
        case  0:
        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--;
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--;
        else {
          __Pyx_RaiseArgtupleInvalid("make_whitefield", 0, 2, 4, 1); __PYX_ERR(0, 761, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis);
          if (value) { values[2] = value; kw_args--; }
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (kw_args > 0) {
          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads);
          if (value) { values[3] = value; kw_args--; }
        }
      }
      if (unlikely(kw_args > 0)) {
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_whitefield") < 0)) __PYX_ERR(0, 761, __pyx_L3_error)
      }
    } else {
      switch (PyTuple_GET_SIZE(__pyx_args)) {
        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
        break;
        default: goto __pyx_L5_argtuple_error;
      }
    }
    __pyx_v_data = ((PyArrayObject *)values[0]);
    __pyx_v_mask = ((PyArrayObject *)values[1]);
    if (values[2]) {
      __pyx_v_axis = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 761, __pyx_L3_error)
    } else {
      __pyx_v_axis = ((int)((int)0));
    }
    if (values[3]) {
      __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[3]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 762, __pyx_L3_error)
    } else {
      __pyx_v_num_threads = ((unsigned int)((unsigned int)1));
    }
  }
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("make_whitefield", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 761, __pyx_L3_error)
  __pyx_L3_error:;
  __Pyx_AddTraceback("pyrost.bin.simulation.make_whitefield", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_data), __pyx_ptype_5numpy_ndarray, 1, "data", 0))) __PYX_ERR(0, 761, __pyx_L1_error)
  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 761, __pyx_L1_error)
  __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_22make_whitefield(__pyx_self, __pyx_v_data, __pyx_v_mask, __pyx_v_axis, __pyx_v_num_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  goto __pyx_L0;
  __pyx_L1_error:;
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_22make_whitefield(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_data, PyArrayObject *__pyx_v_mask, int __pyx_v_axis, unsigned int __pyx_v_num_threads) {
  int __pyx_v_ndim;
  npy_intp __pyx_v_isize;
  npy_intp *__pyx_v_dims;
  int __pyx_v_i;
  npy_intp __pyx_v_npts;
  npy_intp __pyx_v_istride;
  int __pyx_v_type_num;
  PyArrayObject *__pyx_v_out = 0;
  void *__pyx_v__out;
  void *__pyx_v__data;
  unsigned char *__pyx_v__mask;
  PyArrayObject *__pyx_r = NULL;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("make_whitefield", 0);
  __Pyx_INCREF((PyObject *)__pyx_v_data);
  __Pyx_INCREF((PyObject *)__pyx_v_mask);
/* … */
  /* function exit code */
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_XDECREF(__pyx_t_7);
  __Pyx_XDECREF(__pyx_t_8);
  __Pyx_XDECREF(__pyx_t_9);
  __Pyx_AddTraceback("pyrost.bin.simulation.make_whitefield", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __pyx_L0:;
  __Pyx_XDECREF((PyObject *)__pyx_v_out);
  __Pyx_XDECREF((PyObject *)__pyx_v_data);
  __Pyx_XDECREF((PyObject *)__pyx_v_mask);
  __Pyx_XGIVEREF((PyObject *)__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__33 = PyTuple_Pack(15, __pyx_n_s_data, __pyx_n_s_mask, __pyx_n_s_axis, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_isize, __pyx_n_s_dims, __pyx_n_s_i, __pyx_n_s_npts, __pyx_n_s_istride, __pyx_n_s_type_num, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_data_2, __pyx_n_s_mask_2); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__33);
  __Pyx_GIVEREF(__pyx_tuple__33);
/* … */
  __pyx_t_2 = __Pyx_PyInt_From_int(((int)0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
/* … */
  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_GIVEREF(__pyx_t_2);
  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
  __Pyx_GIVEREF(__pyx_t_4);
  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4);
  __pyx_t_2 = 0;
  __pyx_t_4 = 0;
  __pyx_t_4 = __Pyx_PyDict_NewPresized(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_data, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_mask, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_23make_whitefield, 0, __pyx_n_s_make_whitefield, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__34)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_t_1);
  __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_2, __pyx_t_4);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_make_whitefield, __pyx_t_2) < 0) __PYX_ERR(0, 761, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+762:                     num_threads: cython.uint=1) -> np.ndarray:
  __pyx_t_4 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 762, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_4);
 763:     """Generate a whitefield using the median filtering along the `axis`.
 764: 
 765:     Parameters
 766:     ----------
 767:     data : numpy.ndarray
 768:         Intensity frames.
 769:     mask : numpy.ndarray
 770:         Bad pixel mask.
 771:     axis : int, optional
 772:         Array axis along which median values are calculated.
 773:     num_threads : int, optional
 774:         Number of threads.
 775: 
 776:     Returns
 777:     -------
 778:     wfield : numpy.ndarray
 779:         Whitefield.
 780:     """
+781:     data = np.PyArray_GETCONTIGUOUS(data)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_data)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 781, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_data, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
+782:     mask = np.PyArray_GETCONTIGUOUS(mask)
  __pyx_t_1 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_mask)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 782, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __Pyx_DECREF_SET(__pyx_v_mask, ((PyArrayObject *)__pyx_t_1));
  __pyx_t_1 = 0;
 783: 
+784:     if not np.PyArray_ISBOOL(mask):
  __pyx_t_2 = ((!(PyArray_ISBOOL(__pyx_v_mask) != 0)) != 0);
  if (unlikely(__pyx_t_2)) {
/* … */
  }
+785:         raise TypeError('mask array must be of boolean type')
    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 785, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 785, __pyx_L1_error)
/* … */
  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_mask_array_must_be_of_boolean_ty); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 785, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__6);
  __Pyx_GIVEREF(__pyx_tuple__6);
+786:     cdef int ndim = data.ndim
  __pyx_t_3 = __pyx_v_data->nd;
  __pyx_v_ndim = __pyx_t_3;
+787:     if memcmp(data.shape, mask.shape, ndim * sizeof(np.npy_intp)):
  __pyx_t_2 = (memcmp(__pyx_v_data->dimensions, __pyx_v_mask->dimensions, (__pyx_v_ndim * (sizeof(npy_intp)))) != 0);
  if (unlikely(__pyx_t_2)) {
/* … */
  }
+788:         raise ValueError('mask and data arrays must have identical shapes')
    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 788, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
    __PYX_ERR(0, 788, __pyx_L1_error)
/* … */
  __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_mask_and_data_arrays_must_have_i); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 788, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__7);
  __Pyx_GIVEREF(__pyx_tuple__7);
+789:     axis = axis if axis >= 0 else ndim + axis
  if (((__pyx_v_axis >= 0) != 0)) {
    __pyx_t_3 = __pyx_v_axis;
  } else {
    __pyx_t_3 = (__pyx_v_ndim + __pyx_v_axis);
  }
  __pyx_v_axis = __pyx_t_3;
+790:     axis = axis if axis <= ndim - 1 else ndim - 1
  if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) {
    __pyx_t_4 = __pyx_v_axis;
  } else {
    __pyx_t_4 = (__pyx_v_ndim - 1);
  }
  __pyx_v_axis = __pyx_t_4;
+791:     cdef np.npy_intp isize = np.PyArray_SIZE(data)
  __pyx_v_isize = PyArray_SIZE(__pyx_v_data);
+792:     cdef np.npy_intp *dims = <np.npy_intp *>malloc((ndim - 1) * sizeof(np.npy_intp))
  __pyx_v_dims = ((npy_intp *)malloc(((__pyx_v_ndim - 1) * (sizeof(npy_intp)))));
+793:     if dims is NULL:
  __pyx_t_2 = ((__pyx_v_dims == NULL) != 0);
  if (__pyx_t_2) {
/* … */
  }
+794:         abort()
    abort();
 795:     cdef int i
+796:     for i in range(axis):
  __pyx_t_3 = __pyx_v_axis;
  __pyx_t_5 = __pyx_t_3;
  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
    __pyx_v_i = __pyx_t_6;
+797:         dims[i] = data.shape[i]
    (__pyx_v_dims[__pyx_v_i]) = (__pyx_v_data->dimensions[__pyx_v_i]);
  }
+798:     cdef np.npy_intp npts = data.shape[axis]
  __pyx_v_npts = (__pyx_v_data->dimensions[__pyx_v_axis]);
+799:     for i in range(axis + 1, ndim):
  __pyx_t_3 = __pyx_v_ndim;
  __pyx_t_5 = __pyx_t_3;
  for (__pyx_t_6 = (__pyx_v_axis + 1); __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
    __pyx_v_i = __pyx_t_6;
+800:         dims[i - 1] = data.shape[i]
    (__pyx_v_dims[(__pyx_v_i - 1)]) = (__pyx_v_data->dimensions[__pyx_v_i]);
  }
+801:     cdef np.npy_intp istride = np.PyArray_STRIDE(data, axis) / np.PyArray_ITEMSIZE(data)
  __pyx_v_istride = (PyArray_STRIDE(__pyx_v_data, __pyx_v_axis) / PyArray_ITEMSIZE(__pyx_v_data));
+802:     cdef int type_num = np.PyArray_TYPE(data)
  __pyx_v_type_num = PyArray_TYPE(__pyx_v_data);
+803:     cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim - 1, dims, type_num)
  __pyx_t_1 = PyArray_SimpleNew((__pyx_v_ndim - 1), __pyx_v_dims, __pyx_v_type_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 803, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_1);
  __pyx_t_7 = __pyx_t_1;
  __Pyx_INCREF(__pyx_t_7);
  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  __pyx_v_out = ((PyArrayObject *)__pyx_t_7);
  __pyx_t_7 = 0;
+804:     cdef void *_out = <void *>np.PyArray_DATA(out)
  __pyx_v__out = ((void *)PyArray_DATA(__pyx_v_out));
+805:     cdef void *_data = <void *>np.PyArray_DATA(data)
  __pyx_v__data = ((void *)PyArray_DATA(__pyx_v_data));
+806:     cdef unsigned char *_mask = <unsigned char *>np.PyArray_DATA(mask)
  __pyx_v__mask = ((unsigned char *)PyArray_DATA(__pyx_v_mask));
+807:     with nogil:
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L12;
        }
        __pyx_L11_error: {
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L1_error;
        }
        __pyx_L12:;
      }
  }
+808:         if type_num == np.NPY_FLOAT64:
        switch (__pyx_v_type_num) {
          case NPY_FLOAT64:
/* … */
          break;
          case NPY_FLOAT32:
+809:                 whitefield(_out, _data, _mask, isize, npts, istride, 8, compare_double, num_threads)
          whitefield(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, 8, compare_double, __pyx_v_num_threads);
+810:         elif type_num == np.NPY_FLOAT32:
          break;
          case NPY_INT32:
+811:                 whitefield(_out, _data, _mask, isize, npts, istride, 4, compare_float, num_threads)
          whitefield(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, 4, compare_float, __pyx_v_num_threads);
+812:         elif type_num == np.NPY_INT32:
          break;
          default:
+813:                 whitefield(_out, _data, _mask, isize, npts, istride, 4, compare_long, num_threads)
          whitefield(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_isize, __pyx_v_npts, __pyx_v_istride, 4, compare_long, __pyx_v_num_threads);
 814:         else:
+815:             raise TypeError('data argument has incompatible type: {:s}'.format(data.dtype))
          {
              #ifdef WITH_THREAD
              PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
              #endif
              /*try:*/ {
                __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_data_argument_has_incompatible_t, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 815, __pyx_L14_error)
                __Pyx_GOTREF(__pyx_t_1);
                __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_data), __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 815, __pyx_L14_error)
                __Pyx_GOTREF(__pyx_t_8);
                __pyx_t_9 = NULL;
                if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
                  __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1);
                  if (likely(__pyx_t_9)) {
                    PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
                    __Pyx_INCREF(__pyx_t_9);
                    __Pyx_INCREF(function);
                    __Pyx_DECREF_SET(__pyx_t_1, function);
                  }
                }
                __pyx_t_7 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_9, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_8);
                __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
                __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
                if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 815, __pyx_L14_error)
                __Pyx_GOTREF(__pyx_t_7);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 815, __pyx_L14_error)
                __Pyx_GOTREF(__pyx_t_1);
                __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
                __Pyx_Raise(__pyx_t_1, 0, 0, 0);
                __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
                __PYX_ERR(0, 815, __pyx_L14_error)
              }
              /*finally:*/ {
                __pyx_L14_error: {
                  #ifdef WITH_THREAD
                  __Pyx_PyGILState_Release(__pyx_gilstate_save);
                  #endif
                  goto __pyx_L11_error;
                }
              }
          }
          break;
        }
      }
+816:     free(dims)
  free(__pyx_v_dims);
+817:     return out
  __Pyx_XDECREF(((PyObject *)__pyx_r));
  __Pyx_INCREF(((PyObject *)__pyx_v_out));
  __pyx_r = __pyx_v_out;
  goto __pyx_L0;