diff --git a/ext/include/arithmetic.c b/ext/include/arithmetic.c index d1617d2..39792dc 100644 --- a/ext/include/arithmetic.c +++ b/ext/include/arithmetic.c @@ -14,15 +14,12 @@ void tensor_multiply(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - product = zephir_get_doubleval(&ba[i].val) * zephir_get_doubleval(&bb[i].val); + product = zephir_get_doubleval(zend_hash_index_find(aa, i)) * zephir_get_doubleval(zend_hash_index_find(ab, i)); add_next_index_double(&c, product); } @@ -39,15 +36,12 @@ void tensor_divide(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - quotient = zephir_get_doubleval(&ba[i].val) / zephir_get_doubleval(&bb[i].val); + quotient = zephir_get_doubleval(zend_hash_index_find(aa, i)) / zephir_get_doubleval(zend_hash_index_find(ab, i)); add_next_index_double(&c, quotient); } @@ -64,15 +58,12 @@ void tensor_add(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - sum = zephir_get_doubleval(&ba[i].val) + zephir_get_doubleval(&bb[i].val); + sum = zephir_get_doubleval(zend_hash_index_find(aa, i)) + zephir_get_doubleval(zend_hash_index_find(ab, i)); add_next_index_double(&c, sum); } @@ -89,15 +80,12 @@ void tensor_subtract(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - difference = zephir_get_doubleval(&ba[i].val) - zephir_get_doubleval(&bb[i].val); + difference = zephir_get_doubleval(zend_hash_index_find(aa, i)) - zephir_get_doubleval(zend_hash_index_find(ab, i)); add_next_index_double(&c, difference); } @@ -114,15 +102,12 @@ void tensor_pow(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - power = pow(zephir_get_doubleval(&ba[i].val), zephir_get_doubleval(&bb[i].val)); + power = pow(zephir_get_doubleval(zend_hash_index_find(aa, i)), zephir_get_doubleval(zend_hash_index_find(ab, i))); add_next_index_double(&c, power); } @@ -139,15 +124,12 @@ void tensor_mod(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - mod_function(&modulus, &ba[i].val, &bb[i].val); + mod_function(&modulus, zend_hash_index_find(aa, i), zend_hash_index_find(ab, i)); add_next_index_zval(&c, &modulus); } @@ -163,8 +145,6 @@ void tensor_multiply_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -172,7 +152,7 @@ void tensor_multiply_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - product = zephir_get_doubleval(&ba[i].val) * ab; + product = zephir_get_doubleval(zend_hash_index_find(aa, i)) * ab; add_next_index_double(&c, product); } @@ -188,8 +168,6 @@ void tensor_divide_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -197,7 +175,7 @@ void tensor_divide_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - quotient = zephir_get_doubleval(&ba[i].val) / ab; + quotient = zephir_get_doubleval(zend_hash_index_find(aa, i)) / ab; add_next_index_double(&c, quotient); } @@ -213,8 +191,6 @@ void tensor_add_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -222,7 +198,7 @@ void tensor_add_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - sum = zephir_get_doubleval(&ba[i].val) + ab; + sum = zephir_get_doubleval(zend_hash_index_find(aa, i)) + ab; add_next_index_double(&c, sum); } @@ -238,8 +214,6 @@ void tensor_subtract_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -247,7 +221,7 @@ void tensor_subtract_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - difference = zephir_get_doubleval(&ba[i].val) - ab; + difference = zephir_get_doubleval(zend_hash_index_find(aa, i)) - ab; add_next_index_double(&c, difference); } @@ -263,8 +237,6 @@ void tensor_pow_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -272,7 +244,7 @@ void tensor_pow_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - power = pow(zephir_get_doubleval(&ba[i].val), ab); + power = pow(zephir_get_doubleval(zend_hash_index_find(aa, i)), ab); add_next_index_double(&c, power); } @@ -288,14 +260,12 @@ void tensor_mod_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - mod_function(&modulus, &ba[i].val, b); + mod_function(&modulus, zend_hash_index_find(aa, i), b); add_next_index_zval(&c, &modulus); } diff --git a/ext/include/comparison.c b/ext/include/comparison.c index c823ea7..652c7b6 100644 --- a/ext/include/comparison.c +++ b/ext/include/comparison.c @@ -13,15 +13,12 @@ void tensor_equal(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) == zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) == zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -39,15 +36,12 @@ void tensor_not_equal(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) != zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) != zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -65,15 +59,12 @@ void tensor_greater(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) > zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) > zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -91,15 +82,12 @@ void tensor_greater_equal(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) >= zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) >= zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -117,15 +105,12 @@ void tensor_less(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) < zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) < zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -143,15 +128,12 @@ void tensor_less_equal(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) <= zephir_get_doubleval(&bb[i].val)) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) <= zephir_get_doubleval(zend_hash_index_find(ab, i))) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -168,8 +150,6 @@ void tensor_equal_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -177,7 +157,7 @@ void tensor_equal_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) == ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) == ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -194,8 +174,6 @@ void tensor_not_equal_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -203,7 +181,7 @@ void tensor_not_equal_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) != ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) != ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -220,8 +198,6 @@ void tensor_greater_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -229,7 +205,7 @@ void tensor_greater_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) > ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) > ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -246,8 +222,6 @@ void tensor_greater_equal_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -255,7 +229,7 @@ void tensor_greater_equal_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) >= ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) >= ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -272,8 +246,6 @@ void tensor_less_scalar(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - double ab = zephir_get_doubleval(b); unsigned int n = zend_array_count(aa); @@ -281,7 +253,7 @@ void tensor_less_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) < ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) < ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); @@ -307,7 +279,7 @@ void tensor_less_equal_scalar(zval * return_value, zval * a, zval * b) array_init_size(&c, n); for (i = 0; i < n; ++i) { - if (zephir_get_doubleval(&ba[i].val) <= ab) { + if (zephir_get_doubleval(zend_hash_index_find(aa, i)) <= ab) { add_next_index_long(&c, 1); } else { add_next_index_long(&c, 0); diff --git a/ext/include/linear_algebra.c b/ext/include/linear_algebra.c index 5a1403e..6eecf8d 100644 --- a/ext/include/linear_algebra.c +++ b/ext/include/linear_algebra.c @@ -17,36 +17,33 @@ void tensor_matmul(zval * return_value, zval * a, zval * b) { unsigned int i, j; - Bucket * row; + zval * row; zval rowC, c; zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int m = zend_array_count(aa); unsigned int p = zend_array_count(ab); - unsigned int n = zend_array_count(Z_ARR(bb[0].val)); + unsigned int n = zend_array_count(Z_ARR_P(zend_hash_index_find(ab, 0))); double * va = emalloc(m * p * sizeof(double)); double * vb = emalloc(n * p * sizeof(double)); double * vc = emalloc(m * n * sizeof(double)); for (i = 0; i < m; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < p; ++j) { - va[i * p + j] = zephir_get_doubleval(&row[j].val); + va[i * p + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } for (i = 0; i < p; ++i) { - row = Z_ARR(bb[i].val)->arData; + row = zend_hash_index_find(ab, i); for (j = 0; j < n; ++j) { - vb[i * n + j] = zephir_get_doubleval(&row[j].val); + vb[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -85,15 +82,12 @@ void tensor_dot(zval * return_value, zval * a, zval * b) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int n = zend_array_count(aa); double sigma = 0.0; for (i = 0; i < n; ++i) { - sigma += zephir_get_doubleval(&ba[i].val) * zephir_get_doubleval(&bb[i].val); + sigma += zephir_get_doubleval(zend_hash_index_find(aa, i)) * zephir_get_doubleval(zend_hash_index_find(ab, i)); } RETVAL_DOUBLE(sigma); @@ -101,33 +95,31 @@ void tensor_dot(zval * return_value, zval * a, zval * b) /** * Return the multiplicative inverse of a square matrix A. - * + * * @param return_value * @param a */ void tensor_inverse(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval rowB, b; zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); double * va = emalloc(n * n * sizeof(double)); int * pivots = emalloc(n * sizeof(int)); for (i = 0; i < n; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } - + lapack_int status; status = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, n, n, va, n, pivots); @@ -169,15 +161,13 @@ void tensor_inverse(zval * return_value, zval * a) void tensor_pseudoinverse(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval b, rowB; zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int m = zend_array_count(aa); - unsigned int n = zend_array_count(Z_ARR(ba[0].val)); + unsigned int n = zend_array_count(Z_ARR_P(zend_hash_index_find(aa, 0))); unsigned int k = MIN(m, n); double * va = emalloc(m * n * sizeof(double)); @@ -187,10 +177,10 @@ void tensor_pseudoinverse(zval * return_value, zval * a) double * vb = emalloc(n * m * sizeof(double)); for (i = 0; i < m; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -236,25 +226,23 @@ void tensor_pseudoinverse(zval * return_value, zval * a) void tensor_ref(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval rowB, b; zval tuple; zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int m = zend_array_count(aa); - unsigned int n = zend_array_count(Z_ARR(ba[0].val)); + unsigned int n = zend_array_count(Z_ARR_P(zend_hash_index_find(aa, 0))); double * va = emalloc(m * n * sizeof(double)); int * pivots = emalloc(MIN(m, n) * sizeof(int)); for (i = 0; i < m; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -306,22 +294,20 @@ void tensor_ref(zval * return_value, zval * a) void tensor_cholesky(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval rowB, b; zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); double * va = emalloc(n * n * sizeof(double)); for (i = 0; i < n; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -361,24 +347,22 @@ void tensor_cholesky(zval * return_value, zval * a) void tensor_lu(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval rowL, l, rowU, u, rowP, p; zval tuple; zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); double * va = emalloc(n * n * sizeof(double)); int * pivots = emalloc(n * sizeof(int)); for (i = 0; i < n; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -457,7 +441,7 @@ void tensor_lu(zval * return_value, zval * a) void tensor_eig(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval eigenvalues; zval eigenvectors; zval eigenvector; @@ -465,8 +449,6 @@ void tensor_eig(zval * return_value, zval * a) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); double * va = emalloc(n * n * sizeof(double)); @@ -475,10 +457,10 @@ void tensor_eig(zval * return_value, zval * a) double * vr = emalloc(n * n * sizeof(double)); for (i = 0; i < n; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -525,7 +507,7 @@ void tensor_eig(zval * return_value, zval * a) void tensor_eig_symmetric(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval eigenvalues; zval eigenvectors; zval eigenvector; @@ -533,18 +515,16 @@ void tensor_eig_symmetric(zval * return_value, zval * a) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int n = zend_array_count(aa); double * va = emalloc(n * n * sizeof(double)); double * wr = emalloc(n * sizeof(double)); for (i = 0; i < n; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } @@ -589,7 +569,7 @@ void tensor_eig_symmetric(zval * return_value, zval * a) void tensor_svd(zval * return_value, zval * a) { unsigned int i, j; - Bucket * row; + zval * row; zval u, rowU; zval s; zval vt, rowVt; @@ -597,10 +577,8 @@ void tensor_svd(zval * return_value, zval * a) zend_array * aa = Z_ARR_P(a); - Bucket * ba = aa->arData; - unsigned int m = zend_array_count(aa); - unsigned int n = zend_array_count(Z_ARR(ba[0].val)); + unsigned int n = zend_array_count(Z_ARR_P(zend_hash_index_find(aa, 0))); unsigned int k = MIN(m, n); double * va = emalloc(m * n * sizeof(double)); @@ -609,10 +587,10 @@ void tensor_svd(zval * return_value, zval * a) double * vvt = emalloc(n * n * sizeof(double)); for (i = 0; i < m; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < n; ++j) { - va[i * n + j] = zephir_get_doubleval(&row[j].val); + va[i * n + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } diff --git a/ext/include/signal_processing.c b/ext/include/signal_processing.c index 4061ca0..07040b1 100644 --- a/ext/include/signal_processing.c +++ b/ext/include/signal_processing.c @@ -23,9 +23,6 @@ void tensor_convolve_1d(zval * return_value, zval * a, zval * b, zval * stride) zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int s = zephir_get_intval(stride); unsigned int na = zend_array_count(aa); @@ -36,11 +33,11 @@ void tensor_convolve_1d(zval * return_value, zval * a, zval * b, zval * stride) double * vb = emalloc(nb * sizeof(double)); for (i = 0; i < na; ++i) { - va[i] = zephir_get_doubleval(&ba[i].val); + va[i] = zephir_get_doubleval(zend_hash_index_find(aa, i)); } for (i = 0; i < nb; ++i) { - vb[i] = zephir_get_doubleval(&bb[i].val); + vb[i] = zephir_get_doubleval(zend_hash_index_find(ab, i)); } array_init_size(&c, nc / s); @@ -77,38 +74,35 @@ void tensor_convolve_2d(zval * return_value, zval * a, zval * b, zval * stride) unsigned int i, j, k, l; int x, y; double sigma; - Bucket * row; + zval * row; zval rowC, c; zend_array * aa = Z_ARR_P(a); zend_array * ab = Z_ARR_P(b); - Bucket * ba = aa->arData; - Bucket * bb = ab->arData; - unsigned int s = zephir_get_intval(stride); unsigned int ma = zend_array_count(aa); - unsigned int na = zend_array_count(Z_ARR(ba[0].val)); + unsigned int na = zend_array_count(Z_ARR_P(zend_hash_index_find(aa, 0))); unsigned int mb = zend_array_count(ab); - unsigned int nb = zend_array_count(Z_ARR(bb[0].val)); + unsigned int nb = zend_array_count(Z_ARR_P(zend_hash_index_find(ab, 0))); double * va = emalloc(ma * na * sizeof(double)); double * vb = emalloc(mb * nb * sizeof(double)); for (i = 0; i < ma; ++i) { - row = Z_ARR(ba[i].val)->arData; + row = zend_hash_index_find(aa, i); for (j = 0; j < na; ++j) { - va[i * na + j] = zephir_get_doubleval(&row[j].val); + va[i * na + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } for (i = 0; i < mb; ++i) { - row = Z_ARR(bb[i].val)->arData; + row = zend_hash_index_find(ab, i); for (j = 0; j < nb; ++j) { - vb[i * nb + j] = zephir_get_doubleval(&row[j].val); + vb[i * nb + j] = zephir_get_doubleval(zend_hash_index_find(Z_ARR_P(row), j)); } } diff --git a/ext/tensor/matrix.zep.c b/ext/tensor/matrix.zep.c index 742573d..bba37b5 100644 --- a/ext/tensor/matrix.zep.c +++ b/ext/tensor/matrix.zep.c @@ -11329,7 +11329,7 @@ PHP_METHOD(Tensor_Matrix, offsetUnset) * * @param mixed index * @throws \Tensor\Exceptions\InvalidArgumentException - * @return array + * @return \Tensor\Vector */ PHP_METHOD(Tensor_Matrix, offsetGet) { diff --git a/ext/tensor/matrix.zep.h b/ext/tensor/matrix.zep.h index 118b153..373019c 100644 --- a/ext/tensor/matrix.zep.h +++ b/ext/tensor/matrix.zep.h @@ -727,7 +727,7 @@ ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_tensor_matrix_offsetunset, 0, 1, ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() -ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_tensor_matrix_offsetget, 0, 1, IS_ARRAY, 0) +ZEND_BEGIN_ARG_WITH_RETURN_OBJ_INFO_EX(arginfo_tensor_matrix_offsetget, 0, 1, Tensor\\Vector, 0) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() diff --git a/ext/tensor/vector.zep.c b/ext/tensor/vector.zep.c index 1e72c1b..18412f2 100644 --- a/ext/tensor/vector.zep.c +++ b/ext/tensor/vector.zep.c @@ -3117,20 +3117,21 @@ PHP_METHOD(Tensor_Vector, deg2rad) */ PHP_METHOD(Tensor_Vector, sum) { - zval _0; + zval _0, _1; zephir_method_globals *ZEPHIR_METHOD_GLOBALS_PTR = NULL; zend_long ZEPHIR_LAST_CALL_STATUS; zval *this_ptr = getThis(); ZVAL_UNDEF(&_0); + ZVAL_UNDEF(&_1); ZEPHIR_MM_GROW(); zephir_read_property(&_0, this_ptr, ZEND_STRL("a"), PH_NOISY_CC | PH_READONLY); - ZEPHIR_RETURN_CALL_FUNCTION("array_sum", NULL, 17, &_0); + ZEPHIR_CALL_FUNCTION(&_1, "array_sum", NULL, 17, &_0); zephir_check_call_status(); - RETURN_MM(); + RETURN_MM_DOUBLE(zephir_get_doubleval(&_1)); } /** @@ -3140,20 +3141,21 @@ PHP_METHOD(Tensor_Vector, sum) */ PHP_METHOD(Tensor_Vector, product) { - zval _0; + zval _0, _1; zephir_method_globals *ZEPHIR_METHOD_GLOBALS_PTR = NULL; zend_long ZEPHIR_LAST_CALL_STATUS; zval *this_ptr = getThis(); ZVAL_UNDEF(&_0); + ZVAL_UNDEF(&_1); ZEPHIR_MM_GROW(); zephir_read_property(&_0, this_ptr, ZEND_STRL("a"), PH_NOISY_CC | PH_READONLY); - ZEPHIR_RETURN_CALL_FUNCTION("array_product", NULL, 18, &_0); + ZEPHIR_CALL_FUNCTION(&_1, "array_product", NULL, 18, &_0); zephir_check_call_status(); - RETURN_MM(); + RETURN_MM_DOUBLE(zephir_get_doubleval(&_1)); } /** @@ -3163,20 +3165,21 @@ PHP_METHOD(Tensor_Vector, product) */ PHP_METHOD(Tensor_Vector, min) { - zval _0; + zval _0, _1; zephir_method_globals *ZEPHIR_METHOD_GLOBALS_PTR = NULL; zend_long ZEPHIR_LAST_CALL_STATUS; zval *this_ptr = getThis(); ZVAL_UNDEF(&_0); + ZVAL_UNDEF(&_1); ZEPHIR_MM_GROW(); zephir_read_property(&_0, this_ptr, ZEND_STRL("a"), PH_NOISY_CC | PH_READONLY); - ZEPHIR_RETURN_CALL_FUNCTION("min", NULL, 19, &_0); + ZEPHIR_CALL_FUNCTION(&_1, "min", NULL, 19, &_0); zephir_check_call_status(); - RETURN_MM(); + RETURN_MM_DOUBLE(zephir_get_doubleval(&_1)); } /** diff --git a/tensor/matrix.zep b/tensor/matrix.zep index bb41c14..1bd450b 100644 --- a/tensor/matrix.zep +++ b/tensor/matrix.zep @@ -3212,9 +3212,9 @@ class Matrix implements Tensor * * @param mixed index * @throws \Tensor\Exceptions\InvalidArgumentException - * @return array + * @return \Tensor\Vector */ - public function offsetGet(const var index) -> array + public function offsetGet(const var index) -> { var row; diff --git a/tensor/vector.zep b/tensor/vector.zep index 7ed2715..044cf13 100644 --- a/tensor/vector.zep +++ b/tensor/vector.zep @@ -1153,7 +1153,7 @@ class Vector implements Tensor */ public function sum() -> float { - return array_sum(this->a); + return (float) array_sum(this->a); } /** @@ -1163,7 +1163,7 @@ class Vector implements Tensor */ public function product() -> float { - return array_product(this->a); + return (float) array_product(this->a); } /** @@ -1173,7 +1173,7 @@ class Vector implements Tensor */ public function min() -> float { - return min(this->a); + return (float) min(this->a); } /** diff --git a/tests/MatrixTest.php b/tests/MatrixTest.php index 4fbc7b0..8683ef7 100644 --- a/tests/MatrixTest.php +++ b/tests/MatrixTest.php @@ -475,8 +475,8 @@ public function pseudoinverse() : void $expected = Matrix::quick([ [0.03147992432205172, 0.05583000490505223], - [-0.009144418751313844, 0.0700371382524], - [0.012665545511877228, -0.00313572980169575], + [-0.009144418751313844, 0.07003713825239999], + [0.01266554551187723, -0.0031357298016957483], ]); $this->assertEquals($expected, $b); @@ -2320,7 +2320,7 @@ public function quantile() : void $b = $a->quantile(0.4); - $expected = ColumnVector::quick([6.200000000000003, 2.8000000000000007, -6.6]); + $expected = ColumnVector::quick([6.200000000000001, 2.8000000000000003, -6.6]); $this->assertEquals($expected, $b); } diff --git a/tests/VectorTest.php b/tests/VectorTest.php index 52dd7f0..9421d45 100644 --- a/tests/VectorTest.php +++ b/tests/VectorTest.php @@ -402,8 +402,8 @@ public function convolve() : void $c = $a->convolve($b, 1); $expected = Vector::quick([ - -60, 2.5, 259, -144, 40.5, 370.1, 462.20000000000005, - 10, 1764.3000000000002, 1625.1, 2234.7000000000003, 1378.4, 535.5, + -60.0, 2.5, 259.0, -144.0, 40.5, 370.1, 462.20000000000005, + 10.000000000000114, 1764.3000000000002, 1625.1, 2234.7, 1378.4, 535.5, ]); $this->assertEquals($expected, $c);