Commit 6f29cbd7e9 for aom
commit 6f29cbd7e9773ddc78251171ec6d2c0c7dafcb67
Author: Gerda Zsejke More <gerdazsejke.more@arm.com>
Date: Wed Nov 26 11:52:14 2025 +0100
Optimize Neon HBD sad and sad_skip functions
Optimize aom_highbd_sad_neon and aom_highbd_sad_skip_neon functions
by accumulating into 16-bit vectors and widening only at the point of
overflow.
Change-Id: I0835ad7263cb0e0213e9855fc8a3f03c17db2edc
diff --git a/aom_dsp/arm/highbd_sad_neon.c b/aom_dsp/arm/highbd_sad_neon.c
index 115aae8755..0487a325a6 100644
--- a/aom_dsp/arm/highbd_sad_neon.c
+++ b/aom_dsp/arm/highbd_sad_neon.c
@@ -19,15 +19,14 @@
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/sum_neon.h"
-static inline uint32_t highbd_sad4xh_small_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
+static inline uint32_t highbd_sad4xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
uint32x4_t sum = vdupq_n_u32(0);
- int i = h;
do {
uint16x4_t s = vld1_u16(src16_ptr);
uint16x4_t r = vld1_u16(ref16_ptr);
@@ -35,248 +34,220 @@ static inline uint32_t highbd_sad4xh_small_neon(const uint8_t *src_ptr,
src16_ptr += src_stride;
ref16_ptr += ref_stride;
- } while (--i != 0);
+ } while (--h != 0);
return horizontal_add_u32x4(sum);
}
-static inline uint32_t highbd_sad8xh_small_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
+static inline uint32_t highbd_sad8xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- uint16x8_t sum = vdupq_n_u16(0);
-
- int i = h;
- do {
- uint16x8_t s = vld1q_u16(src16_ptr);
- uint16x8_t r = vld1q_u16(ref16_ptr);
- sum = vabaq_u16(sum, s, r);
-
- src16_ptr += src_stride;
- ref16_ptr += ref_stride;
- } while (--i != 0);
- return horizontal_add_u16x8(sum);
-}
+ // 'h_overflow' is the number of 8-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 16 8-wide rows.
+ const int h_overflow = 16;
+ // If block height 'h' is smaller than this limit, use 'h' instead.
+ const int h_limit = h < h_overflow ? h : h_overflow;
+ assert(h % h_limit == 0);
-#if !CONFIG_REALTIME_ONLY
-static inline uint32_t highbd_sad8xh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
- const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
- const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
uint32x4_t sum_u32 = vdupq_n_u32(0);
- int i = h;
do {
- uint16x8_t s = vld1q_u16(src16_ptr);
- uint16x8_t r = vld1q_u16(ref16_ptr);
- uint16x8_t sum_u16 = vabdq_u16(s, r);
+ uint16x8_t sum_u16 = vdupq_n_u16(0);
+
+ int i = h_limit;
+ do {
+ uint16x8_t s0 = vld1q_u16(src16_ptr);
+ uint16x8_t r0 = vld1q_u16(ref16_ptr);
+ sum_u16 = vabaq_u16(sum_u16, s0, r0);
+
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ } while (--i != 0);
+
sum_u32 = vpadalq_u16(sum_u32, sum_u16);
- src16_ptr += src_stride;
- ref16_ptr += ref_stride;
- } while (--i != 0);
+ h -= h_limit;
+ } while (h != 0);
return horizontal_add_u32x4(sum_u32);
}
-#endif // !CONFIG_REALTIME_ONLY
-static inline uint32_t highbd_sad16xh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
+static inline uint32_t highbd_sadwxh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int w, int h,
+ const int h_overflow) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
- int i = h;
- do {
- uint16x8_t s0 = vld1q_u16(src16_ptr);
- uint16x8_t r0 = vld1q_u16(ref16_ptr);
- uint16x8_t diff0 = vabdq_u16(s0, r0);
- sum[0] = vpadalq_u16(sum[0], diff0);
-
- uint16x8_t s1 = vld1q_u16(src16_ptr + 8);
- uint16x8_t r1 = vld1q_u16(ref16_ptr + 8);
- uint16x8_t diff1 = vabdq_u16(s1, r1);
- sum[1] = vpadalq_u16(sum[1], diff1);
-
- src16_ptr += src_stride;
- ref16_ptr += ref_stride;
- } while (--i != 0);
+ const int h_limit = h < h_overflow ? h : h_overflow;
+ assert(h % h_limit == 0);
- sum[0] = vaddq_u32(sum[0], sum[1]);
- return horizontal_add_u32x4(sum[0]);
-}
-
-static inline uint32_t highbd_sadwxh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int w, int h) {
- const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
- const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
- vdupq_n_u32(0) };
+ uint32x4_t sum_u32 = vdupq_n_u32(0);
- int i = h;
do {
- int j = 0;
+ uint16x8_t sum_u16[2] = { vdupq_n_u16(0), vdupq_n_u16(0) };
+
+ int i = h_limit;
do {
- uint16x8_t s0 = vld1q_u16(src16_ptr + j);
- uint16x8_t r0 = vld1q_u16(ref16_ptr + j);
- uint16x8_t diff0 = vabdq_u16(s0, r0);
- sum[0] = vpadalq_u16(sum[0], diff0);
+ int j = 0;
+ do {
+ uint16x8_t s0 = vld1q_u16(src16_ptr + j);
+ uint16x8_t r0 = vld1q_u16(ref16_ptr + j);
+ sum_u16[0] = vabaq_u16(sum_u16[0], s0, r0);
- uint16x8_t s1 = vld1q_u16(src16_ptr + j + 8);
- uint16x8_t r1 = vld1q_u16(ref16_ptr + j + 8);
- uint16x8_t diff1 = vabdq_u16(s1, r1);
- sum[1] = vpadalq_u16(sum[1], diff1);
+ uint16x8_t s1 = vld1q_u16(src16_ptr + j + 8);
+ uint16x8_t r1 = vld1q_u16(ref16_ptr + j + 8);
+ sum_u16[1] = vabaq_u16(sum_u16[1], s1, r1);
- uint16x8_t s2 = vld1q_u16(src16_ptr + j + 16);
- uint16x8_t r2 = vld1q_u16(ref16_ptr + j + 16);
- uint16x8_t diff2 = vabdq_u16(s2, r2);
- sum[2] = vpadalq_u16(sum[2], diff2);
+ j += 16;
+ } while (j < w);
- uint16x8_t s3 = vld1q_u16(src16_ptr + j + 24);
- uint16x8_t r3 = vld1q_u16(ref16_ptr + j + 24);
- uint16x8_t diff3 = vabdq_u16(s3, r3);
- sum[3] = vpadalq_u16(sum[3], diff3);
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ } while (--i != 0);
- j += 32;
- } while (j < w);
+ sum_u32 = vpadalq_u16(sum_u32, sum_u16[0]);
+ sum_u32 = vpadalq_u16(sum_u32, sum_u16[1]);
- src16_ptr += src_stride;
- ref16_ptr += ref_stride;
- } while (--i != 0);
-
- sum[0] = vaddq_u32(sum[0], sum[1]);
- sum[2] = vaddq_u32(sum[2], sum[3]);
- sum[0] = vaddq_u32(sum[0], sum[2]);
-
- return horizontal_add_u32x4(sum[0]);
+ h -= h_limit;
+ } while (h != 0);
+ return horizontal_add_u32x4(sum_u32);
}
-static inline unsigned int highbd_sad128xh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
- return highbd_sadwxh_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, 128,
- h);
+static inline uint32_t highbd_sad16xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ // 'h_overflow' is the number of 16-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 16 16-wide rows using two accumulators.
+ const int h_overflow = 16;
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 16, h,
+ h_overflow);
}
-static inline unsigned int highbd_sad64xh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
- return highbd_sadwxh_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, 64,
- h);
+static inline uint32_t highbd_sad32xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ // 'h_overflow' is the number of 32-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 8 32-wide rows using two accumulators.
+ const int h_overflow = 8;
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 32, h,
+ h_overflow);
}
-static inline unsigned int highbd_sad32xh_large_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int h) {
- return highbd_sadwxh_large_neon(src_ptr, src_stride, ref_ptr, ref_stride, 32,
- h);
+static inline uint32_t highbd_sad64xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ // 'h_overflow' is the number of 64-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 4 64-wide rows using two accumulators.
+ const int h_overflow = 4;
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 64, h,
+ h_overflow);
}
-#define HBD_SAD_WXH_SMALL_NEON(w, h) \
- unsigned int aom_highbd_sad##w##x##h##_neon( \
- const uint8_t *src, int src_stride, const uint8_t *ref, \
- int ref_stride) { \
- return highbd_sad##w##xh_small_neon(src, src_stride, ref, ref_stride, \
- (h)); \
- }
+static inline uint32_t highbd_sad128xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ // 'h_overflow' is the number of 128-wide rows we can process before 16-bit
+ // accumulators overflow. After hitting this limit accumulate into 32-bit
+ // elements. 65535 / 4095 ~= 16, so 2 128-wide rows using two accumulators.
+ const int h_overflow = 2;
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 128, h,
+ h_overflow);
+}
-#define HBD_SAD_WXH_LARGE_NEON(w, h) \
+#define HBD_SAD_WXH_NEON(w, h) \
unsigned int aom_highbd_sad##w##x##h##_neon( \
const uint8_t *src, int src_stride, const uint8_t *ref, \
int ref_stride) { \
- return highbd_sad##w##xh_large_neon(src, src_stride, ref, ref_stride, \
- (h)); \
+ return highbd_sad##w##xh_neon(src, src_stride, ref, ref_stride, (h)); \
}
-HBD_SAD_WXH_SMALL_NEON(4, 4)
-HBD_SAD_WXH_SMALL_NEON(4, 8)
+HBD_SAD_WXH_NEON(4, 4)
+HBD_SAD_WXH_NEON(4, 8)
-HBD_SAD_WXH_SMALL_NEON(8, 4)
-HBD_SAD_WXH_SMALL_NEON(8, 8)
-HBD_SAD_WXH_SMALL_NEON(8, 16)
+HBD_SAD_WXH_NEON(8, 4)
+HBD_SAD_WXH_NEON(8, 8)
+HBD_SAD_WXH_NEON(8, 16)
-HBD_SAD_WXH_LARGE_NEON(16, 8)
-HBD_SAD_WXH_LARGE_NEON(16, 16)
-HBD_SAD_WXH_LARGE_NEON(16, 32)
+HBD_SAD_WXH_NEON(16, 8)
+HBD_SAD_WXH_NEON(16, 16)
+HBD_SAD_WXH_NEON(16, 32)
-HBD_SAD_WXH_LARGE_NEON(32, 16)
-HBD_SAD_WXH_LARGE_NEON(32, 32)
-HBD_SAD_WXH_LARGE_NEON(32, 64)
+HBD_SAD_WXH_NEON(32, 16)
+HBD_SAD_WXH_NEON(32, 32)
+HBD_SAD_WXH_NEON(32, 64)
-HBD_SAD_WXH_LARGE_NEON(64, 32)
-HBD_SAD_WXH_LARGE_NEON(64, 64)
-HBD_SAD_WXH_LARGE_NEON(64, 128)
+HBD_SAD_WXH_NEON(64, 32)
+HBD_SAD_WXH_NEON(64, 64)
+HBD_SAD_WXH_NEON(64, 128)
-HBD_SAD_WXH_LARGE_NEON(128, 64)
-HBD_SAD_WXH_LARGE_NEON(128, 128)
+HBD_SAD_WXH_NEON(128, 64)
+HBD_SAD_WXH_NEON(128, 128)
#if !CONFIG_REALTIME_ONLY
-HBD_SAD_WXH_SMALL_NEON(4, 16)
+HBD_SAD_WXH_NEON(4, 16)
-HBD_SAD_WXH_LARGE_NEON(8, 32)
+HBD_SAD_WXH_NEON(8, 32)
-HBD_SAD_WXH_LARGE_NEON(16, 4)
-HBD_SAD_WXH_LARGE_NEON(16, 64)
+HBD_SAD_WXH_NEON(16, 4)
+HBD_SAD_WXH_NEON(16, 64)
-HBD_SAD_WXH_LARGE_NEON(32, 8)
+HBD_SAD_WXH_NEON(32, 8)
-HBD_SAD_WXH_LARGE_NEON(64, 16)
+HBD_SAD_WXH_NEON(64, 16)
#endif // !CONFIG_REALTIME_ONLY
-#define HBD_SAD_SKIP_WXH_SMALL_NEON(w, h) \
- unsigned int aom_highbd_sad_skip_##w##x##h##_neon( \
- const uint8_t *src, int src_stride, const uint8_t *ref, \
- int ref_stride) { \
- return 2 * highbd_sad##w##xh_small_neon(src, 2 * src_stride, ref, \
- 2 * ref_stride, (h) / 2); \
- }
+#undef HBD_SAD_WXH_NEON
-#define HBD_SAD_SKIP_WXH_LARGE_NEON(w, h) \
- unsigned int aom_highbd_sad_skip_##w##x##h##_neon( \
- const uint8_t *src, int src_stride, const uint8_t *ref, \
- int ref_stride) { \
- return 2 * highbd_sad##w##xh_large_neon(src, 2 * src_stride, ref, \
- 2 * ref_stride, (h) / 2); \
+#define HBD_SAD_SKIP_WXH_NEON(w, h) \
+ unsigned int aom_highbd_sad_skip_##w##x##h##_neon( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, \
+ int ref_stride) { \
+ return 2 * highbd_sad##w##xh_neon(src, 2 * src_stride, ref, \
+ 2 * ref_stride, (h) / 2); \
}
-HBD_SAD_SKIP_WXH_SMALL_NEON(8, 16)
+HBD_SAD_SKIP_WXH_NEON(8, 16)
-HBD_SAD_SKIP_WXH_LARGE_NEON(16, 16)
-HBD_SAD_SKIP_WXH_LARGE_NEON(16, 32)
+HBD_SAD_SKIP_WXH_NEON(16, 16)
+HBD_SAD_SKIP_WXH_NEON(16, 32)
-HBD_SAD_SKIP_WXH_LARGE_NEON(32, 16)
-HBD_SAD_SKIP_WXH_LARGE_NEON(32, 32)
-HBD_SAD_SKIP_WXH_LARGE_NEON(32, 64)
+HBD_SAD_SKIP_WXH_NEON(32, 16)
+HBD_SAD_SKIP_WXH_NEON(32, 32)
+HBD_SAD_SKIP_WXH_NEON(32, 64)
-HBD_SAD_SKIP_WXH_LARGE_NEON(64, 32)
-HBD_SAD_SKIP_WXH_LARGE_NEON(64, 64)
-HBD_SAD_SKIP_WXH_LARGE_NEON(64, 128)
+HBD_SAD_SKIP_WXH_NEON(64, 32)
+HBD_SAD_SKIP_WXH_NEON(64, 64)
+HBD_SAD_SKIP_WXH_NEON(64, 128)
-HBD_SAD_SKIP_WXH_LARGE_NEON(128, 64)
-HBD_SAD_SKIP_WXH_LARGE_NEON(128, 128)
+HBD_SAD_SKIP_WXH_NEON(128, 64)
+HBD_SAD_SKIP_WXH_NEON(128, 128)
#if !CONFIG_REALTIME_ONLY
-HBD_SAD_SKIP_WXH_SMALL_NEON(4, 16)
+HBD_SAD_SKIP_WXH_NEON(4, 16)
-HBD_SAD_SKIP_WXH_SMALL_NEON(8, 32)
+HBD_SAD_SKIP_WXH_NEON(8, 32)
-HBD_SAD_SKIP_WXH_LARGE_NEON(16, 64)
+HBD_SAD_SKIP_WXH_NEON(16, 64)
-HBD_SAD_SKIP_WXH_LARGE_NEON(64, 16)
+HBD_SAD_SKIP_WXH_NEON(64, 16)
#endif // !CONFIG_REALTIME_ONLY
+#undef HBD_SAD_SKIP_WXH_NEON
+
static inline uint32_t highbd_sad8xh_avg_neon(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,