Commit 8deb765d39 for aom

commit 8deb765d39d4710f085dd276008c541a3f7d71f9
Author: Gerda Zsejke More <gerdazsejke.more@arm.com>
Date:   Mon Oct 20 09:03:39 2025 +0200

    Optimize SVE implementation of av1_warp_affine

    In case of beta == 0 and alpha == 0 we know filter values before
    processing loops so add new logic using Neon USMMLA instructions,
    keeping the SVE implementation for the remaining cases.

    By permuting the input samples and the 6-tap filter we can use the
    Armv8.6 I8MM USMMLA matrix multiply instructions to accelerate
    horizontal 6-tap convolutions. The 2x8 by 8x2 matrix multiply
    instruction does twice the work of the USDOT dot product
    instructions.

    In case of 8-tap filter we can replace the USDOT instruction with
    USMMLA, to apply a 7-tap filter, and an extra multiplication.

    Change-Id: Ia5df8a05512525f1eb4add4da4423c67aff2ca74

diff --git a/av1/common/arm/warp_plane_sve.c b/av1/common/arm/warp_plane_sve.c
index 10aee35b1a..455e29d124 100644
--- a/av1/common/arm/warp_plane_sve.c
+++ b/av1/common/arm/warp_plane_sve.c
@@ -20,6 +20,24 @@ DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
   8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
 };

+DECLARE_ALIGNED(16, static const uint8_t, kMatMul6PermuteTbl[32]) = {
+  // clang-format off
+  0,  1,  2,  3,  4,  5,  6,  7,  2,  3,  4,  5,  6,  7,  8,  9,
+  4,  5,  6,  7,  8,  9, 10, 11,  6,  7,  8,  9, 10, 11, 12, 13
+  // clang-format on
+};
+
+DECLARE_ALIGNED(16, static const uint8_t, kMatMul8PermuteTbl[32]) = {
+  // clang-format off
+  1,  2,  3,  4,  5,  6,  7,  8,  3,  4,  5,  6,  7,  8,  9, 10,
+  5,  6,  7,  8,  9, 10, 11, 12,  7,  8,  9, 10, 11, 12, 13, 14
+  // clang-format on
+};
+
+DECLARE_ALIGNED(16, static const uint8_t, kTblIdx0_3[16]) = {
+  0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1,
+};
+
 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
                                                            int sx, int alpha) {
   // Only put the constant in every other lane to avoid double-counting when
@@ -87,6 +105,47 @@ static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
   return vreinterpretq_s16_u16(res);
 }

+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1_6tap_beta0(
+    const uint8x16_t in, const int8x16_t filter, const uint8x16_t perm_tbl) {
+  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
+
+  // Permute samples ready for matrix multiply.
+  // { 0,  1,  2,  3,  4,  5,  6,  7,  2,  3,  4,  5,  6,  7,  8,  9 }
+  const uint8x16_t perm_samples = vqtbl1q_u8(in, perm_tbl);
+
+  // These instructions multiply a 2x8 matrix (samples) by an 8x2 matrix
+  // (filter), destructively accumulating into the destination register.
+  int32x4_t sum = vusmmlaq_s32(add_const, perm_samples, filter);
+
+  uint16x8_t res =
+      vcombine_u16(vqrshrun_n_s32(sum, ROUND0_BITS), vdup_n_u16(0));
+
+  return vreinterpretq_s16_u16(res);
+}
+
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1_8tap_beta0(
+    const uint8x16_t in, const int8x16_t filter, const int32x4_t f0,
+    const uint8x16_t perm_tbl, const uint8x16_t tbl_idx0_3) {
+  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
+
+  // Permute samples ready for matrix multiply.
+  // { 1,  2,  3,  4,  5,  6,  7,  8,  3,  4,  5,  6,  7,  8,  9, 10 }
+  const uint8x16_t perm_samples = vqtbl1q_u8(in, perm_tbl);
+  // Get samples 0..3 to apply tap 0 after matrix multiply.
+  const int32x4_t samples_0_3 =
+      vreinterpretq_s32_u8(vqtbl1q_u8(in, tbl_idx0_3));
+
+  // Calculate partial 7-tap convolution.
+  int32x4_t sum = vusmmlaq_s32(add_const, perm_samples, filter);
+  // Apply tap 0 and accumulate.
+  sum = vmlaq_s32(sum, samples_0_3, f0);
+
+  uint16x8_t res =
+      vcombine_u16(vqrshrun_n_s32(sum, ROUND0_BITS), vdup_n_u16(0));
+
+  return vreinterpretq_s16_u16(res);
+}
+
 static AOM_FORCE_INLINE int16x8_t
 horizontal_filter_4x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
@@ -116,6 +175,53 @@ static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
   return horizontal_filter_4x1_f1_beta0(in, f_s16);
 }

+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1_6tap_beta0(
+    const uint8x16_t in, const int8x16_t filter, const uint8x16x2_t perm_tbl) {
+  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
+
+  // Permute samples ready for matrix multiply.
+  // { 0,  1,  2,  3,  4,  5,  6,  7,  2,  3,  4,  5,  6,  7,  8,  9 }
+  // { 4,  5,  6,  7,  8,  9, 10, 11,  6,  7,  8,  9, 10, 11, 12, 13 }
+  uint8x16_t perm_samples[2] = { vqtbl1q_u8(in, perm_tbl.val[0]),
+                                 vqtbl1q_u8(in, perm_tbl.val[1]) };
+
+  // These instructions multiply a 2x8 matrix (samples) by an 8x2 matrix
+  // (filter), destructively accumulating into the destination register.
+  int32x4_t sum0123 = vusmmlaq_s32(add_const, perm_samples[0], filter);
+  int32x4_t sum4567 = vusmmlaq_s32(add_const, perm_samples[1], filter);
+
+  uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+                                vqrshrun_n_s32(sum4567, ROUND0_BITS));
+
+  return vreinterpretq_s16_u16(res);
+}
+
+static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1_8tap_beta0(
+    const uint8x16_t in, const int8x16_t filter, const int16x4_t f0,
+    const uint8x16x2_t perm_tbl) {
+  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
+
+  // Permute samples ready for matrix multiply.
+  // { 1,  2,  3,  4,  5,  6,  7,  8,  3,  4,  5,  6,  7,  8,  9, 10 }
+  // { 5,  6,  7,  8,  9, 10, 11, 12,  7,  8,  9, 10, 11, 12, 13, 14 }
+  uint8x16_t perm_samples[2] = { vqtbl1q_u8(in, perm_tbl.val[0]),
+                                 vqtbl1q_u8(in, perm_tbl.val[1]) };
+  // Get samples 0..7 to apply tap 0 after matrix multiply.
+  int16x8_t samples_0_7 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(in)));
+
+  // Calculate partial 7-tap convolution.
+  int32x4_t sum0123 = vusmmlaq_s32(add_const, perm_samples[0], filter);
+  int32x4_t sum4567 = vusmmlaq_s32(add_const, perm_samples[1], filter);
+  // Apply tap 0 and accumulate.
+  sum0123 = vmlal_s16(sum0123, vget_low_s16(samples_0_7), f0);
+  sum4567 = vmlal_s16(sum4567, vget_high_s16(samples_0_7), f0);
+
+  uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, ROUND0_BITS),
+                                vqrshrun_n_s32(sum4567, ROUND0_BITS));
+
+  return vreinterpretq_s16_u16(res);
+}
+
 static AOM_FORCE_INLINE int16x8_t
 horizontal_filter_8x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
@@ -271,13 +377,209 @@ static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
   *res_high = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67));
 }

+static AOM_FORCE_INLINE void warp_affine_horizontal_sve(
+    const uint8_t *ref, int width, int height, int stride, int p_width,
+    int p_height, int16_t alpha, int16_t beta, const int64_t x4,
+    const int64_t y4, const int i, int16x8_t tmp[]) {
+  const int bd = 8;
+  const int reduce_bits_horiz = ROUND0_BITS;
+  const int height_limit = AOMMIN(8, p_height - i) + 7;
+
+  int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
+  int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
+
+  int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
+  sx4 += alpha * (-4) + beta * (-4) + (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+  sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
+
+  if (ix4 <= -7) {
+    for (int k = 0; k < height_limit; ++k) {
+      int iy = clamp_iy(iy4 + k - 7, height);
+      int16_t dup_val =
+          (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) +
+          ref[iy * stride] * (1 << (FILTER_BITS - reduce_bits_horiz));
+      tmp[k] = vdupq_n_s16(dup_val);
+    }
+    return;
+  } else if (ix4 >= width + 6) {
+    for (int k = 0; k < height_limit; ++k) {
+      int iy = clamp_iy(iy4 + k - 7, height);
+      int16_t dup_val = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1)) +
+                        ref[iy * stride + (width - 1)] *
+                            (1 << (FILTER_BITS - reduce_bits_horiz));
+      tmp[k] = vdupq_n_s16(dup_val);
+    }
+    return;
+  }
+
+  static const uint8_t kIotaArr[] = { 0, 1, 2,  3,  4,  5,  6,  7,
+                                      8, 9, 10, 11, 12, 13, 14, 15 };
+  const uint8x16_t indx = vld1q_u8(kIotaArr);
+
+  const int out_of_boundary_left = -(ix4 - 6);
+  const int out_of_boundary_right = (ix4 + 8) - width;
+
+  if (p_width == 4) {
+    if (beta == 0) {
+      if (alpha == 0) {
+        int16_t *f_ptr =
+            (int16_t *)(av1_warped_filter + (sx4 >> WARPEDDIFF_PREC_BITS));
+        int16x8_t f_s16 = vld1q_s16(f_ptr);
+        const int8x8_t x_filter = vmovn_s16(f_s16);
+        if ((f_ptr[0] | f_ptr[1]) == 0) {
+          uint8x16_t perm_tbl = vld1q_u8(kMatMul6PermuteTbl);
+          // Offset the permutation table to match filter layout.
+          perm_tbl = vaddq_u8(perm_tbl, vdupq_n_u8(2));
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f2, f3, f4, f5, f6, f7, 0, 0, 0, f2, f3, f4, f5, f6, f7, 0 }
+          const int8x16_t filter = vcombine_s8(vext_s8(x_filter, x_filter, 2),
+                                               vext_s8(x_filter, x_filter, 1));
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else if ((f_ptr[0] | f_ptr[7]) == 0) {
+          uint8x16_t perm_tbl = vld1q_u8(kMatMul6PermuteTbl);
+          // Offset the permutation table to match filter layout.
+          perm_tbl = vaddq_u8(perm_tbl, vdupq_n_u8(1));
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f1, f2, f3, f4, f5, f6, 0, 0, 0, f1, f2, f3, f4, f5, f6, 0 }
+          const int8x16_t filter =
+              vcombine_s8(vext_s8(x_filter, x_filter, 1), x_filter);
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else if ((f_ptr[6] | f_ptr[7]) == 0) {
+          const uint8x16_t perm_tbl = vld1q_u8(kMatMul6PermuteTbl);
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f0, f1, f2, f3, f4, f5, 0, 0, 0, f0, f1, f2, f3, f4, f5, 0 }
+          const int8x16_t filter =
+              vcombine_s8(x_filter, vext_s8(x_filter, x_filter, 7));
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else {
+          const uint8x16_t perm_tbl = vld1q_u8(kMatMul8PermuteTbl);
+          const uint8x16_t tbl_idx0_3 = vld1q_u8(kTblIdx0_3);
+
+          // Stagger filter for use with the matrix multiply
+          // instructions.
+          // { f1, f2, f3, f4, f5, f6, f7, 0, 0, f1, f2, f3, f4, f5, f6, f7 }
+          const int8x16_t filter = vcombine_s8(
+              vext_s8(x_filter, vdup_n_s8(0), 1), vset_lane_s8(0, x_filter, 0));
+          const int32x4_t f0 = vdupq_n_s32(f_ptr[0]);
+
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1_8tap_beta0, filter,
+                                 f0, perm_tbl, tbl_idx0_3);
+        }
+      } else {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f4, sx4, alpha);
+      }
+    } else {
+      if (alpha == 0) {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f1,
+                               (sx4 + beta * (k - 3)));
+      } else {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_4x1_f4, (sx4 + beta * (k - 3)),
+                               alpha);
+      }
+    }
+  } else {
+    if (beta == 0) {
+      if (alpha == 0) {
+        int16_t *f_ptr =
+            (int16_t *)(av1_warped_filter + (sx4 >> WARPEDDIFF_PREC_BITS));
+        int16x8_t f_s16 = vld1q_s16(f_ptr);
+        const int8x8_t x_filter = vmovn_s16(f_s16);
+        if ((f_ptr[0] | f_ptr[1]) == 0) {
+          uint8x16x2_t perm_tbl = vld1q_u8_x2(kMatMul6PermuteTbl);
+          // Offset the permutation table to match filter layout.
+          perm_tbl.val[0] = vaddq_u8(perm_tbl.val[0], vdupq_n_u8(2));
+          perm_tbl.val[1] = vaddq_u8(perm_tbl.val[1], vdupq_n_u8(2));
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f0, f1, f2, f3, f4, f5, 0, 0, 0, f0, f1, f2, f3, f4, f5, 0 }
+          const int8x16_t filter = vcombine_s8(vext_s8(x_filter, x_filter, 2),
+                                               vext_s8(x_filter, x_filter, 1));
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else if ((f_ptr[0] | f_ptr[7]) == 0) {
+          uint8x16x2_t perm_tbl = vld1q_u8_x2(kMatMul6PermuteTbl);
+          // Offset the permutation table to match filter layout.
+          perm_tbl.val[0] = vaddq_u8(perm_tbl.val[0], vdupq_n_u8(1));
+          perm_tbl.val[1] = vaddq_u8(perm_tbl.val[1], vdupq_n_u8(1));
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f1, f2, f3, f4, f5, f6, 0, 0, 0, f1, f2, f3, f4, f5, f6, 0 }
+          const int8x16_t filter =
+              vcombine_s8(vext_s8(x_filter, x_filter, 1), x_filter);
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else if ((f_ptr[6] | f_ptr[7]) == 0) {
+          uint8x16x2_t perm_tbl = vld1q_u8_x2(kMatMul6PermuteTbl);
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f0, f1, f2, f3, f4, f5, 0, 0, 0, f0, f1, f2, f3, f4, f5, 0 }
+          const int8x16_t filter =
+              vcombine_s8(x_filter, vext_s8(x_filter, x_filter, 7));
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1_6tap_beta0, filter,
+                                 perm_tbl);
+        } else {
+          uint8x16x2_t perm_tbl = vld1q_u8_x2(kMatMul8PermuteTbl);
+          // Stagger filter for use with the matrix multiply instructions.
+          // { f1, f2, f3, f4, f5, f6, f7, 0, 0, f1, f2, f3, f4, f5, f6, f7 }
+          const int8x16_t filter = vcombine_s8(
+              vext_s8(x_filter, vdup_n_s8(0), 1), vset_lane_s8(0, x_filter, 0));
+
+          const int16x4_t f0 = vdup_n_s16(f_ptr[0]);
+
+          APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1_8tap_beta0, filter,
+                                 f0, perm_tbl);
+        }
+      } else {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f8, sx4, alpha);
+      }
+    } else {
+      if (alpha == 0) {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f1,
+                               (sx4 + beta * (k - 3)));
+      } else {
+        APPLY_HORIZONTAL_SHIFT(horizontal_filter_8x1_f8, (sx4 + beta * (k - 3)),
+                               alpha);
+      }
+    }
+  }
+}
+
 void av1_warp_affine_sve(const int32_t *mat, const uint8_t *ref, int width,
                          int height, int stride, uint8_t *pred, int p_col,
                          int p_row, int p_width, int p_height, int p_stride,
                          int subsampling_x, int subsampling_y,
                          ConvolveParams *conv_params, int16_t alpha,
                          int16_t beta, int16_t gamma, int16_t delta) {
-  av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
-                         p_width, p_height, p_stride, subsampling_x,
-                         subsampling_y, conv_params, alpha, beta, gamma, delta);
+  const int w0 = conv_params->fwd_offset;
+  const int w1 = conv_params->bck_offset;
+  const int is_compound = conv_params->is_compound;
+  uint16_t *const dst = conv_params->dst;
+  const int dst_stride = conv_params->dst_stride;
+  const int do_average = conv_params->do_average;
+  const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
+
+  assert(IMPLIES(is_compound, dst != NULL));
+  assert(IMPLIES(do_average, is_compound));
+
+  for (int i = 0; i < p_height; i += 8) {
+    for (int j = 0; j < p_width; j += 8) {
+      const int32_t src_x = (p_col + j + 4) << subsampling_x;
+      const int32_t src_y = (p_row + i + 4) << subsampling_y;
+      const int64_t dst_x =
+          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
+      const int64_t dst_y =
+          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
+
+      const int64_t x4 = dst_x >> subsampling_x;
+      const int64_t y4 = dst_y >> subsampling_y;
+
+      int16x8_t tmp[15];
+      warp_affine_horizontal_sve(ref, width, height, stride, p_width, p_height,
+                                 alpha, beta, x4, y4, i, tmp);
+      warp_affine_vertical(pred, p_width, p_height, p_stride, is_compound, dst,
+                           dst_stride, do_average, use_dist_wtd_comp_avg, gamma,
+                           delta, y4, i, j, tmp, w0, w1);
+    }
+  }
 }