Skip to content

Added zend_simd.h #18413

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
83 changes: 83 additions & 0 deletions Zend/zend_simd.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/*
+----------------------------------------------------------------------+
| Zend Engine |
+----------------------------------------------------------------------+
| Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Saki Takamachi <[email protected]> |
+----------------------------------------------------------------------+
*/

#ifndef ZEND_SIMD_H
#define ZEND_SIMD_H

#ifdef __SSE2__
#include <emmintrin.h>
#define ZEND_HAVE_VECTOR_128


#elif defined(__aarch64__) || defined(_M_ARM64)
#include <arm_neon.h>
#define ZEND_HAVE_VECTOR_128

typedef int8x16_t __m128i;

#define _mm_setzero_si128() vdupq_n_s8(0)
#define _mm_set1_epi8(x) vdupq_n_s8(x)
#define _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) \
vreinterpretq_s8_s16((int16x8_t) { \
(int16_t) (x7), (int16_t) (x6), (int16_t) (x5), (int16_t) (x4), \
(int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) })
#define _mm_set_epi32(x0, x1, x2, x3) \
vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) })
#define _mm_set_epi64x(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) })
#define _mm_load_si128(x) vld1q_s8((const int8_t *) (x))
#define _mm_loadu_si128(x) _mm_load_si128(x)
#define _mm_store_si128(to, x) vst1q_s8((int8_t *) (to), x)
#define _mm_storeu_si128(to, x) _mm_store_si128(to, x)

#define _mm_or_si128(a, b) vorrq_s8(a, b)
#define _mm_xor_si128(a, b) veorq_s8(a, b)
#define _mm_and_si128(a, b) vandq_s8(a, b)

#define _mm_slli_si128(x, imm) \
((imm) >= 16 ? vdupq_n_s8(0) : \
vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), 16 - (imm))))
#define _mm_srli_si128(x, imm) \
((imm) >= 16 ? vdupq_n_s8(0) : \
vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), (imm))))

/**
* In practice, there is no problem, but a runtime error for signed integer overflow is triggered by UBSAN,
* so perform the calculation as unsigned. Since it is optimized at compile time, there are no unnecessary casts at runtime.
*/
#define _mm_add_epi8(a, b) vreinterpretq_s8_u8(vaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))

#define _mm_cmpeq_epi8(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b)))
#define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b)))
#define _mm_cmpgt_epi8(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b)))

static zend_always_inline int _mm_movemask_epi8(__m128i x)
{
/**
* based on code from
* https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon
*/
uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(x), 7));
uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
}

#endif

#endif /* ZEND_SIMD_H */
20 changes: 10 additions & 10 deletions ext/bcmath/libbcmath/src/convert.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,22 @@
#include "bcmath.h"
#include "convert.h"
#include "private.h"
#include "simd.h"
#include "zend_simd.h"

char *bc_copy_and_toggle_bcd(char *restrict dest, const char *source, const char *source_end)
{
const size_t bulk_shift = SWAR_REPEAT('0');

#ifdef HAVE_BC_SIMD_128
#ifdef ZEND_HAVE_VECTOR_128
/* SIMD SSE2 or NEON bulk shift + copy */
bc_simd_128_t shift_vector = bc_simd_set_8x16('0');
while (source + sizeof(bc_simd_128_t) <= source_end) {
bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) source);
bytes = bc_simd_xor_8x16(bytes, shift_vector);
bc_simd_store_8x16((bc_simd_128_t *) dest, bytes);

source += sizeof(bc_simd_128_t);
dest += sizeof(bc_simd_128_t);
__m128i shift_vector = _mm_set1_epi8('0');
while (source + sizeof(__m128i) <= source_end) {
__m128i bytes = _mm_loadu_si128((const __m128i *) source);
bytes = _mm_xor_si128(bytes, shift_vector);
_mm_storeu_si128((__m128i *) dest, bytes);

source += sizeof(__m128i);
dest += sizeof(__m128i);
}
#endif

Expand Down
59 changes: 0 additions & 59 deletions ext/bcmath/libbcmath/src/simd.h

This file was deleted.

36 changes: 18 additions & 18 deletions ext/bcmath/libbcmath/src/str2num.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,28 +32,28 @@
#include "bcmath.h"
#include "convert.h"
#include "private.h"
#include "simd.h"
#include "zend_simd.h"
#include <stdbool.h>
#include <stddef.h>

/* Convert strings to bc numbers. Base 10 only.*/
static inline const char *bc_count_digits(const char *str, const char *end)
{
/* Process in bulk */
#ifdef HAVE_BC_SIMD_128
const bc_simd_128_t offset = bc_simd_set_8x16((signed char) (SCHAR_MIN - '0'));
#ifdef ZEND_HAVE_VECTOR_128
const __m128i offset = _mm_set1_epi8((signed char) (SCHAR_MIN - '0'));
/* we use the less than comparator, so add 1 */
const bc_simd_128_t threshold = bc_simd_set_8x16(SCHAR_MIN + ('9' + 1 - '0'));
const __m128i threshold = _mm_set1_epi8(SCHAR_MIN + ('9' + 1 - '0'));

while (str + sizeof(bc_simd_128_t) <= end) {
bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) str);
while (str + sizeof(__m128i) <= end) {
__m128i bytes = _mm_loadu_si128((const __m128i *) str);
/* Wrapping-add the offset to the bytes, such that all bytes below '0' are positive and others are negative.
* More specifically, '0' will be -128 and '9' will be -119. */
bytes = bc_simd_add_8x16(bytes, offset);
bytes = _mm_add_epi8(bytes, offset);
/* Now mark all bytes that are <= '9', i.e. <= -119, i.e. < -118, i.e. the threshold. */
bytes = bc_simd_cmplt_8x16(bytes, threshold);
bytes = _mm_cmplt_epi8(bytes, threshold);

int mask = bc_simd_movemask_8x16(bytes);
int mask = _mm_movemask_epi8(bytes);
if (mask != 0xffff) {
/* At least one of the bytes is not within range. Move to the first offending byte. */
#ifdef PHP_HAVE_BUILTIN_CTZL
Expand All @@ -63,7 +63,7 @@ static inline const char *bc_count_digits(const char *str, const char *end)
#endif
}

str += sizeof(bc_simd_128_t);
str += sizeof(__m128i);
}
#endif

Expand All @@ -77,19 +77,19 @@ static inline const char *bc_count_digits(const char *str, const char *end)
static inline const char *bc_skip_zero_reverse(const char *scanner, const char *stop)
{
/* Check in bulk */
#ifdef HAVE_BC_SIMD_128
const bc_simd_128_t c_zero_repeat = bc_simd_set_8x16('0');
while (scanner - sizeof(bc_simd_128_t) >= stop) {
scanner -= sizeof(bc_simd_128_t);
bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) scanner);
#ifdef ZEND_HAVE_VECTOR_128
const __m128i c_zero_repeat = _mm_set1_epi8('0');
while (scanner - sizeof(__m128i) >= stop) {
scanner -= sizeof(__m128i);
__m128i bytes = _mm_loadu_si128((const __m128i *) scanner);
/* Checks if all numeric strings are equal to '0'. */
bytes = bc_simd_cmpeq_8x16(bytes, c_zero_repeat);
bytes = _mm_cmpeq_epi8(bytes, c_zero_repeat);

int mask = bc_simd_movemask_8x16(bytes);
int mask = _mm_movemask_epi8(bytes);
/* The probability of having 16 trailing 0s in a row is very low, so we use EXPECTED. */
if (EXPECTED(mask != 0xffff)) {
/* Move the pointer back and check each character in loop. */
scanner += sizeof(bc_simd_128_t);
scanner += sizeof(__m128i);
break;
}
}
Expand Down
4 changes: 3 additions & 1 deletion ext/opcache/ZendAccelerator.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ typedef int gid_t;
#include <immintrin.h>
#endif

#include "zend_simd.h"

ZEND_EXTENSION();

#ifndef ZTS
Expand Down Expand Up @@ -171,7 +173,7 @@ static void bzero_aligned(void *mem, size_t size)
_mm256_store_si256((__m256i*)(p+32), ymm0);
p += 64;
}
#elif defined(__SSE2__)
#elif defined(ZEND_HAVE_VECTOR_128)
char *p = (char*)mem;
char *end = p + size;
__m128i xmm0 = _mm_setzero_si128();
Expand Down
9 changes: 5 additions & 4 deletions ext/standard/string.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,11 @@
#include "ext/random/php_random.h"

#ifdef __SSE2__
#include <emmintrin.h>
#include "Zend/zend_bitset.h"
#endif

#include "zend_simd.h"

/* this is read-only, so it's ok */
ZEND_SET_ALIGNED(16, static const char hexconvtab[]) = "0123456789abcdef";

Expand Down Expand Up @@ -2817,7 +2818,7 @@ static zend_string *php_strtr_ex(zend_string *str, const char *str_from, const c
char *input = ZSTR_VAL(str);
size_t len = ZSTR_LEN(str);

#ifdef __SSE2__
#ifdef ZEND_HAVE_VECTOR_128
if (ZSTR_LEN(str) >= sizeof(__m128i)) {
__m128i search = _mm_set1_epi8(ch_from);
__m128i delta = _mm_set1_epi8(ch_to - ch_from);
Expand Down Expand Up @@ -3037,7 +3038,7 @@ static zend_always_inline zend_long count_chars(const char *p, zend_long length,
zend_long count = 0;
const char *endp;

#ifdef __SSE2__
#ifdef ZEND_HAVE_VECTOR_128
if (length >= sizeof(__m128i)) {
__m128i search = _mm_set1_epi8(ch);

Expand Down Expand Up @@ -5835,7 +5836,7 @@ static zend_string *php_str_rot13(zend_string *str)
e = p + ZSTR_LEN(str);
target = ZSTR_VAL(ret);

#ifdef __SSE2__
#ifdef ZEND_HAVE_VECTOR_128
if (e - p > 15) {
const __m128i a_minus_1 = _mm_set1_epi8('a' - 1);
const __m128i m_plus_1 = _mm_set1_epi8('m' + 1);
Expand Down
7 changes: 2 additions & 5 deletions ext/standard/url.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,11 @@
#include <ctype.h>
#include <sys/types.h>

#ifdef __SSE2__
#include <emmintrin.h>
#endif

#include "php.h"

#include "url.h"
#include "file.h"
#include "zend_simd.h"

/* {{{ free_url */
PHPAPI void php_url_free(php_url *theurl)
Expand Down Expand Up @@ -460,7 +457,7 @@ static zend_always_inline zend_string *php_url_encode_impl(const char *s, size_t
start = zend_string_safe_alloc(3, len, 0, 0);
to = (unsigned char*)ZSTR_VAL(start);

#ifdef __SSE2__
#ifdef ZEND_HAVE_VECTOR_128
while (from + 16 < end) {
__m128i mask;
uint32_t bits;
Expand Down