Commit 13e3b51dad for aom
commit 13e3b51dad232c8b78ba4d9c96e7775b5a03ae3a
Author: Gerda Zsejke More <gerdazsejke.more@arm.com>
Date: Wed Nov 26 11:09:34 2025 +0100
Optimize Neon HBD sadx3d functions
Optimize aom_highbd_sadx3d_neon functions by accumulating into 16-bit
vectors and widening only at the point of overflow.
Change-Id: I4aed8c598b704e8356c705511dec39444c99c16b
diff --git a/aom_dsp/arm/highbd_sadxd_neon.c b/aom_dsp/arm/highbd_sadxd_neon.c
index 0e43f6dc34..bdf7a00b70 100644
--- a/aom_dsp/arm/highbd_sadxd_neon.c
+++ b/aom_dsp/arm/highbd_sadxd_neon.c
@@ -352,16 +352,14 @@ HBD_SAD_SKIP_WXH_4D_LARGE_NEON(16, 64)
HBD_SAD_SKIP_WXH_4D_LARGE_NEON(64, 16)
#endif // !CONFIG_REALTIME_ONLY
-static inline void highbd_sad4xhx3d_small_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
+static inline void highbd_sad4xhx3d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
-
uint32x4_t sum[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
int i = 0;
@@ -382,227 +380,198 @@ static inline void highbd_sad4xhx3d_small_neon(const uint8_t *src_ptr,
res[2] = horizontal_add_u32x4(sum[2]);
}
-static inline void highbd_sad8xhx3d_small_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
+static inline void highbd_sad8xhx3d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
- uint16x8_t sum[3] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0) };
+ // 'h_overflow' is the number of 8-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 16 8-wide rows.
+ const int h_overflow = 16;
+ // If block height 'h' is smaller than this limit, use 'h' instead.
+ const int h_limit = h < h_overflow ? h : h_overflow;
+ assert(h % h_limit == 0);
- int i = 0;
- do {
- uint16x8_t s = vld1q_u16(src16_ptr + i * src_stride);
-
- sum[0] = vabaq_u16(sum[0], s, vld1q_u16(ref16_ptr0 + i * ref_stride));
- sum[1] = vabaq_u16(sum[1], s, vld1q_u16(ref16_ptr1 + i * ref_stride));
- sum[2] = vabaq_u16(sum[2], s, vld1q_u16(ref16_ptr2 + i * ref_stride));
-
- } while (++i < h);
-
- res[0] = horizontal_add_u32x4(vpaddlq_u16(sum[0]));
- res[1] = horizontal_add_u32x4(vpaddlq_u16(sum[1]));
- res[2] = horizontal_add_u32x4(vpaddlq_u16(sum[2]));
-}
-
-#if !CONFIG_REALTIME_ONLY
-static inline void highbd_sad8xhx3d_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
- const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
- const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
- const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
- const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
-
- uint32x4_t sum[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
+ uint32x4_t sum_u32[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
+ int h_tmp = h_limit;
int i = 0;
do {
- uint16x8_t s = vld1q_u16(src16_ptr + i * src_stride);
- uint16x8_t r0 = vld1q_u16(ref16_ptr0 + i * ref_stride);
- uint16x8_t r1 = vld1q_u16(ref16_ptr1 + i * ref_stride);
- uint16x8_t r2 = vld1q_u16(ref16_ptr2 + i * ref_stride);
-
- sad8_neon(s, r0, &sum[0]);
- sad8_neon(s, r1, &sum[1]);
- sad8_neon(s, r2, &sum[2]);
-
- } while (++i < h);
-
- res[0] = horizontal_add_u32x4(sum[0]);
- res[1] = horizontal_add_u32x4(sum[1]);
- res[2] = horizontal_add_u32x4(sum[2]);
+ uint16x8_t sum_u16[3] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0) };
+ do {
+ uint16x8_t s0 = vld1q_u16(src16_ptr + i * src_stride);
+
+ sum_u16[0] =
+ vabaq_u16(sum_u16[0], s0, vld1q_u16(ref16_ptr0 + i * ref_stride));
+ sum_u16[1] =
+ vabaq_u16(sum_u16[1], s0, vld1q_u16(ref16_ptr1 + i * ref_stride));
+ sum_u16[2] =
+ vabaq_u16(sum_u16[2], s0, vld1q_u16(ref16_ptr2 + i * ref_stride));
+ } while (++i < h_tmp);
+
+ sum_u32[0] = vpadalq_u16(sum_u32[0], sum_u16[0]);
+ sum_u32[1] = vpadalq_u16(sum_u32[1], sum_u16[1]);
+ sum_u32[2] = vpadalq_u16(sum_u32[2], sum_u16[2]);
+
+ h_tmp += h_limit;
+ h -= h_limit;
+ } while (h != 0);
+
+ res[0] = horizontal_add_u32x4(sum_u32[0]);
+ res[1] = horizontal_add_u32x4(sum_u32[1]);
+ res[2] = horizontal_add_u32x4(sum_u32[2]);
}
-#endif // !CONFIG_REALTIME_ONLY
-static inline void highbd_sad16xhx3d_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
+static inline void highbd_sadwxhx3d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4], int w,
+ int h, const int h_overflow) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
+ uint32x4_t sum_u32[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
- uint32x4_t sum_lo[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
- uint32x4_t sum_hi[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
+ const int h_limit = h < h_overflow ? h : h_overflow;
+ assert(h % h_limit == 0);
- int i = 0;
do {
- uint16x8_t s0 = vld1q_u16(src16_ptr + i * src_stride);
- sad8_neon(s0, vld1q_u16(ref16_ptr0 + i * ref_stride), &sum_lo[0]);
- sad8_neon(s0, vld1q_u16(ref16_ptr1 + i * ref_stride), &sum_lo[1]);
- sad8_neon(s0, vld1q_u16(ref16_ptr2 + i * ref_stride), &sum_lo[2]);
-
- uint16x8_t s1 = vld1q_u16(src16_ptr + i * src_stride + 8);
- sad8_neon(s1, vld1q_u16(ref16_ptr0 + i * ref_stride + 8), &sum_hi[0]);
- sad8_neon(s1, vld1q_u16(ref16_ptr1 + i * ref_stride + 8), &sum_hi[1]);
- sad8_neon(s1, vld1q_u16(ref16_ptr2 + i * ref_stride + 8), &sum_hi[2]);
-
- } while (++i < h);
-
- res[0] = horizontal_add_u32x4(vaddq_u32(sum_lo[0], sum_hi[0]));
- res[1] = horizontal_add_u32x4(vaddq_u32(sum_lo[1], sum_hi[1]));
- res[2] = horizontal_add_u32x4(vaddq_u32(sum_lo[2], sum_hi[2]));
-}
-
-static inline void highbd_sadwxhx3d_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int w, int h) {
- const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
- const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
- const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
- const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
+ uint16x8_t sum_u16[3] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0) };
- uint32x4_t sum_lo[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
- uint32x4_t sum_hi[3] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0) };
- uint32x4_t sum[3];
-
- int i = 0;
- do {
- int j = 0;
+ int i = h_limit;
do {
- uint16x8_t s0 = vld1q_u16(src16_ptr + i * src_stride + j);
- sad8_neon(s0, vld1q_u16(ref16_ptr0 + i * ref_stride + j), &sum_lo[0]);
- sad8_neon(s0, vld1q_u16(ref16_ptr1 + i * ref_stride + j), &sum_lo[1]);
- sad8_neon(s0, vld1q_u16(ref16_ptr2 + i * ref_stride + j), &sum_lo[2]);
-
- uint16x8_t s1 = vld1q_u16(src16_ptr + i * src_stride + j + 8);
- sad8_neon(s1, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 8), &sum_hi[0]);
- sad8_neon(s1, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 8), &sum_hi[1]);
- sad8_neon(s1, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 8), &sum_hi[2]);
-
- uint16x8_t s2 = vld1q_u16(src16_ptr + i * src_stride + j + 16);
- sad8_neon(s2, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 16),
- &sum_lo[0]);
- sad8_neon(s2, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 16),
- &sum_lo[1]);
- sad8_neon(s2, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 16),
- &sum_lo[2]);
-
- uint16x8_t s3 = vld1q_u16(src16_ptr + i * src_stride + j + 24);
- sad8_neon(s3, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 24),
- &sum_hi[0]);
- sad8_neon(s3, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 24),
- &sum_hi[1]);
- sad8_neon(s3, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 24),
- &sum_hi[2]);
-
- j += 32;
- } while (j < w);
-
- } while (++i < h);
-
- sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]);
- sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]);
- sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]);
-
- res[0] = horizontal_add_u32x4(sum[0]);
- res[1] = horizontal_add_u32x4(sum[1]);
- res[2] = horizontal_add_u32x4(sum[2]);
+ int j = 0;
+ do {
+ uint16x8_t s0 = vld1q_u16(src16_ptr + j);
+
+ sum_u16[0] = vabaq_u16(sum_u16[0], s0, vld1q_u16(ref16_ptr0 + j));
+ sum_u16[1] = vabaq_u16(sum_u16[1], s0, vld1q_u16(ref16_ptr1 + j));
+ sum_u16[2] = vabaq_u16(sum_u16[2], s0, vld1q_u16(ref16_ptr2 + j));
+
+ uint16x8_t s1 = vld1q_u16(src16_ptr + j + 8);
+ sum_u16[0] = vabaq_u16(sum_u16[0], s1, vld1q_u16(ref16_ptr0 + j + 8));
+ sum_u16[1] = vabaq_u16(sum_u16[1], s1, vld1q_u16(ref16_ptr1 + j + 8));
+ sum_u16[2] = vabaq_u16(sum_u16[2], s1, vld1q_u16(ref16_ptr2 + j + 8));
+
+ j += 16;
+ } while (j < w);
+
+ src16_ptr += src_stride;
+ ref16_ptr0 += ref_stride;
+ ref16_ptr1 += ref_stride;
+ ref16_ptr2 += ref_stride;
+ } while (--i != 0);
+
+ sum_u32[0] = vpadalq_u16(sum_u32[0], sum_u16[0]);
+ sum_u32[1] = vpadalq_u16(sum_u32[1], sum_u16[1]);
+ sum_u32[2] = vpadalq_u16(sum_u32[2], sum_u16[2]);
+
+ h -= h_limit;
+ } while (h != 0);
+
+ res[0] = horizontal_add_u32x4(sum_u32[0]);
+ res[1] = horizontal_add_u32x4(sum_u32[1]);
+ res[2] = horizontal_add_u32x4(sum_u32[2]);
}
-static inline void highbd_sad128xhx3d_large_neon(
- const uint8_t *src_ptr, int src_stride, const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4], int h) {
- highbd_sadwxhx3d_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, res,
- 128, h);
+static inline void highbd_sad16xhx3d_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
+ // 'h_overflow' is the number of 16-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 8 16-wide rows.
+ const int h_overflow = 8;
+ highbd_sadwxhx3d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 16, h,
+ h_overflow);
}
-static inline void highbd_sad64xhx3d_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
- highbd_sadwxhx3d_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 64,
- h);
+static inline void highbd_sad32xhx3d_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
+ // 'h_overflow' is the number of 32-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 4 32-wide rows.
+ const int h_overflow = 4;
+ highbd_sadwxhx3d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 32, h,
+ h_overflow);
}
-static inline void highbd_sad32xhx3d_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[4],
- int ref_stride, uint32_t res[4],
- int h) {
- highbd_sadwxhx3d_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 32,
- h);
+static inline void highbd_sad64xhx3d_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
+ // 'h_overflow' is the number of 64-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 2 64-wide rows.
+ const int h_overflow = 2;
+ highbd_sadwxhx3d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 64, h,
+ h_overflow);
}
-#define HBD_SAD_WXH_3D_SMALL_NEON(w, h) \
- void aom_highbd_sad##w##x##h##x3d_neon( \
- const uint8_t *src, int src_stride, const uint8_t *const ref_array[4], \
- int ref_stride, uint32_t sad_array[4]) { \
- highbd_sad##w##xhx3d_small_neon(src, src_stride, ref_array, ref_stride, \
- sad_array, (h)); \
- }
+static inline void highbd_sad128xhx3d_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *const ref_ptr[4],
+ int ref_stride, uint32_t res[4],
+ int h) {
+ // 'h_overflow' is the number of 128-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 1 128-wide rows.
+ const int h_overflow = 1;
+ highbd_sadwxhx3d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 128, h,
+ h_overflow);
+}
-#define HBD_SAD_WXH_3D_LARGE_NEON(w, h) \
+#define HBD_SAD_WXH_3D_NEON(w, h) \
void aom_highbd_sad##w##x##h##x3d_neon( \
const uint8_t *src, int src_stride, const uint8_t *const ref_array[4], \
int ref_stride, uint32_t sad_array[4]) { \
- highbd_sad##w##xhx3d_large_neon(src, src_stride, ref_array, ref_stride, \
- sad_array, (h)); \
+ highbd_sad##w##xhx3d_neon(src, src_stride, ref_array, ref_stride, \
+ sad_array, (h)); \
}
-HBD_SAD_WXH_3D_SMALL_NEON(4, 4)
-HBD_SAD_WXH_3D_SMALL_NEON(4, 8)
+HBD_SAD_WXH_3D_NEON(4, 4)
+HBD_SAD_WXH_3D_NEON(4, 8)
-HBD_SAD_WXH_3D_SMALL_NEON(8, 4)
-HBD_SAD_WXH_3D_SMALL_NEON(8, 8)
-HBD_SAD_WXH_3D_SMALL_NEON(8, 16)
+HBD_SAD_WXH_3D_NEON(8, 4)
+HBD_SAD_WXH_3D_NEON(8, 8)
+HBD_SAD_WXH_3D_NEON(8, 16)
-HBD_SAD_WXH_3D_LARGE_NEON(16, 8)
-HBD_SAD_WXH_3D_LARGE_NEON(16, 16)
-HBD_SAD_WXH_3D_LARGE_NEON(16, 32)
+HBD_SAD_WXH_3D_NEON(16, 8)
+HBD_SAD_WXH_3D_NEON(16, 16)
+HBD_SAD_WXH_3D_NEON(16, 32)
-HBD_SAD_WXH_3D_LARGE_NEON(32, 16)
-HBD_SAD_WXH_3D_LARGE_NEON(32, 32)
-HBD_SAD_WXH_3D_LARGE_NEON(32, 64)
+HBD_SAD_WXH_3D_NEON(32, 16)
+HBD_SAD_WXH_3D_NEON(32, 32)
+HBD_SAD_WXH_3D_NEON(32, 64)
-HBD_SAD_WXH_3D_LARGE_NEON(64, 32)
-HBD_SAD_WXH_3D_LARGE_NEON(64, 64)
-HBD_SAD_WXH_3D_LARGE_NEON(64, 128)
+HBD_SAD_WXH_3D_NEON(64, 32)
+HBD_SAD_WXH_3D_NEON(64, 64)
+HBD_SAD_WXH_3D_NEON(64, 128)
-HBD_SAD_WXH_3D_LARGE_NEON(128, 64)
-HBD_SAD_WXH_3D_LARGE_NEON(128, 128)
+HBD_SAD_WXH_3D_NEON(128, 64)
+HBD_SAD_WXH_3D_NEON(128, 128)
#if !CONFIG_REALTIME_ONLY
-HBD_SAD_WXH_3D_SMALL_NEON(4, 16)
+HBD_SAD_WXH_3D_NEON(4, 16)
-HBD_SAD_WXH_3D_LARGE_NEON(8, 32)
+HBD_SAD_WXH_3D_NEON(8, 32)
-HBD_SAD_WXH_3D_LARGE_NEON(16, 4)
-HBD_SAD_WXH_3D_LARGE_NEON(16, 64)
+HBD_SAD_WXH_3D_NEON(16, 4)
+HBD_SAD_WXH_3D_NEON(16, 64)
-HBD_SAD_WXH_3D_LARGE_NEON(32, 8)
+HBD_SAD_WXH_3D_NEON(32, 8)
-HBD_SAD_WXH_3D_LARGE_NEON(64, 16)
+HBD_SAD_WXH_3D_NEON(64, 16)
#endif // !CONFIG_REALTIME_ONLY
+
+#undef HBD_SAD_WXH_3D_NEON