Commit 153c72c670 for aom
commit 153c72c6705e6622c1d74679c30c20bcbdebad42
Author: Rohan Baid <rohan.baid@ittiam.com>
Date: Fri Mar 6 13:44:55 2026 +0530
Improve av1_convolve_x_sr_general_avx2()
This patch introduces the following optimizations:
- Added a specialized width wise handling for 2/4 tap filtering
- Improved the existing 6/8-tap filtering for w >= 16
The scaling w.r.t existing avx2 implementation is provided below:
BlockSize 2tap 4tap 6tap 8tap
2x2 1 0.94 - -
4x4 0.95 1 - -
8x8 0.95 0.98 0.97 1
16x16 0.99 0.98 1.01 1
32x32 1 0.93 0.99 0.97
64x64 0.99 1.01 0.99 1
128x128 1 1 1 1
Change-Id: I7f90448107f8d906a4cafd2a2c00161512c8bf6b
diff --git a/aom_dsp/x86/convolve_avx2.h b/aom_dsp/x86/convolve_avx2.h
index 727156268a..76a6ca0c55 100644
--- a/aom_dsp/x86/convolve_avx2.h
+++ b/aom_dsp/x86/convolve_avx2.h
@@ -22,6 +22,10 @@
#include "av1/common/convolve.h"
#include "av1/common/filter.h"
+#define SECOND_32_BLK (32)
+#define THIRD_32_BLK (32 << 1)
+#define FOURTH_32_BLK (SECOND_32_BLK + THIRD_32_BLK)
+
// filters for 16
DECLARE_ALIGNED(32, static const uint8_t, filt_global_avx2[]) = {
0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 0, 1, 1,
@@ -57,6 +61,18 @@ DECLARE_ALIGNED(32, static const uint8_t,
filt2_global_sse2[16]) = { 2, 3, 3, 4, 4, 5, 5, 6,
10, 11, 11, 12, 12, 13, 13, 14 };
+DECLARE_ALIGNED(32, static const uint8_t,
+ filt3_global_sse2[16]) = { 0, 1, 1, 2, 8, 9, 9, 10,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+DECLARE_ALIGNED(32, static const uint8_t,
+ filt4_global_sse2[16]) = { 2, 3, 3, 4, 10, 11, 11, 12,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+DECLARE_ALIGNED(32, static const uint8_t,
+ filt5_global_sse2[16]) = { 0, 1, 1, 2, 4, 5, 5, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
DECLARE_ALIGNED(32, static const uint8_t,
filt1_global_avx2[32]) = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5,
6, 6, 7, 7, 8, 0, 1, 1, 2, 2, 3,
@@ -1163,4 +1179,295 @@ static inline __m256i highbd_convolve_rounding(
return res_round;
}
+static inline __m256i round_sr_x_avx2(const __m256i data) {
+ // we can perform the below steps:
+ // data = (data + 2) >> 2
+ // data = (data + 8) >> 4,
+ // in the below form as well
+ // data = (data + 0x22) >> 6
+ const __m256i value = _mm256_set1_epi16(34);
+ const __m256i reg = _mm256_add_epi16(data, value);
+ return _mm256_srai_epi16(reg, 6);
+}
+
+static inline __m128i convolve_x_4tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t src_stride,
+ __m128i *const coeffs) {
+ __m128i data[2];
+ const __m128i f_l0 = _mm_load_si128((__m128i const *)filt1_global_sse2);
+ const __m128i f_l1 = _mm_load_si128((__m128i const *)filt2_global_sse2);
+ const __m128i src_1 =
+ load_8bit_8x2_to_1_reg_sse2(src, (int)(sizeof(*src) * src_stride));
+
+ data[0] = _mm_shuffle_epi8(src_1, f_l0);
+ data[1] = _mm_shuffle_epi8(src_1, f_l1);
+ return convolve_lowbd_4tap_ssse3(data, coeffs);
+}
+
+static inline __m128i round_sr_x_ssse3(const __m128i data) {
+ const __m128i val = _mm_set1_epi16(34);
+ const __m128i reg = _mm_add_epi16(data, val);
+ return _mm_srai_epi16(reg, 6);
+}
+
+static inline void store_x_u8_4x2_sse2(const __m128i reg, uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ xx_storel_32(dst, reg);
+ *(uint32_t *)(dst + dst_stride) =
+ ((uint32_t)_mm_extract_epi16(reg, 3) << 16) | _mm_extract_epi16(reg, 2);
+}
+
+static inline void pack_store_x_4x2_sse2(const __m128i reg, uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ const __m128i reg_pack = _mm_packus_epi16(reg, reg);
+ store_x_u8_4x2_sse2(reg_pack, dst, dst_stride);
+}
+
+static inline __m128i convolve_x_4tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t src_stride,
+ __m128i *const coeffs) {
+ __m128i data[2];
+ const __m128i f_0 = _mm_load_si128((__m128i const *)filt3_global_sse2);
+ const __m128i f_1 = _mm_load_si128((__m128i const *)filt4_global_sse2);
+ const __m128i reg =
+ load_8bit_8x2_to_1_reg_sse2(src, (int)(sizeof(*src) * src_stride));
+
+ data[0] = _mm_shuffle_epi8(reg, f_0);
+ data[1] = _mm_shuffle_epi8(reg, f_1);
+ return convolve_lowbd_4tap_ssse3(data, coeffs);
+}
+
+static inline void pack_store_x_2x2_sse2(const __m128i reg, uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ const __m128i data = _mm_packus_epi16(reg, reg);
+ *(int16_t *)dst = (int16_t)_mm_cvtsi128_si32(data);
+ *(int16_t *)(dst + dst_stride) = (int16_t)_mm_extract_epi16(data, 1);
+}
+
+static inline __m128i convolve_x_2tap_ssse3(const __m128i *data,
+ const __m128i *coeff) {
+ return _mm_maddubs_epi16(data[0], coeff[0]);
+}
+
+static inline __m128i load8_x_4x2_sse4(const void *const src,
+ const ptrdiff_t offset) {
+ const __m128i s = _mm_cvtsi32_si128(loadu_int32(src));
+ return _mm_insert_epi32(s, loadu_int32((uint8_t *)src + offset), 1);
+}
+
+static inline __m128i load_x_u8_4x2_sse4(const uint8_t *const src,
+ const ptrdiff_t stride) {
+ return load8_x_4x2_sse4(src, sizeof(*src) * stride);
+}
+
+static inline __m128i convolve_x_2tap_2x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i *coeffs) {
+ const __m128i flt = _mm_load_si128((__m128i const *)filt5_global_sse2);
+ const __m128i reg = load_x_u8_4x2_sse4(src, stride);
+ const __m128i data = _mm_shuffle_epi8(reg, flt);
+ return convolve_x_2tap_ssse3(&data, coeffs);
+}
+
+static inline __m128i convolve_x_2tap_4x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i *coeffs) {
+ const __m128i flt = _mm_load_si128((__m128i const *)filt1_global_sse2);
+ const __m128i data =
+ load_8bit_8x2_to_1_reg_sse2(src, (int)(sizeof(*src) * stride));
+ const __m128i res = _mm_shuffle_epi8(data, flt);
+ return convolve_x_2tap_ssse3(&res, coeffs);
+}
+
+static inline void convolve_x_2tap_8x2_ssse3(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m128i *coeffs,
+ __m128i *data) {
+ __m128i res[2];
+ const __m128i reg_00 = _mm_loadu_si128((__m128i *)src);
+ const __m128i reg_10 = _mm_loadu_si128((__m128i *)(src + stride));
+ const __m128i reg_01 = _mm_srli_si128(reg_00, 1);
+ const __m128i reg_11 = _mm_srli_si128(reg_10, 1);
+ res[0] = _mm_unpacklo_epi8(reg_00, reg_01);
+ res[1] = _mm_unpacklo_epi8(reg_10, reg_11);
+
+ data[0] = convolve_x_2tap_ssse3(&res[0], coeffs);
+ data[1] = convolve_x_2tap_ssse3(&res[1], coeffs);
+}
+
+static inline __m256i loadu_x_8bit_16x2_avx2(const void *const src,
+ const ptrdiff_t offset) {
+ const __m128i reg0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i reg1 = _mm_loadu_si128((__m128i *)((uint8_t *)src + offset));
+ return _mm256_setr_m128i(reg0, reg1);
+}
+
+static inline __m256i convolve_x_2tap_avx2(const __m256i *data,
+ const __m256i *coeffs) {
+ return _mm256_maddubs_epi16(data[0], coeffs[0]);
+}
+
+static inline void convolve_x_2tap_16x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i *coeffs,
+ __m256i *data) {
+ const __m256i reg0 = loadu_x_8bit_16x2_avx2(src, stride);
+ const __m256i reg1 = loadu_x_8bit_16x2_avx2(src + 1, stride);
+ const __m256i res0 = _mm256_unpacklo_epi8(reg0, reg1);
+ const __m256i res1 = _mm256_unpackhi_epi8(reg0, reg1);
+ data[0] = convolve_x_2tap_avx2(&res0, coeffs);
+ data[1] = convolve_x_2tap_avx2(&res1, coeffs);
+}
+
+static inline void storeu_x_8bit_16x2_ssse3(const __m256i src, void *const dst,
+ const ptrdiff_t offset) {
+ const __m128i reg0 = _mm256_castsi256_si128(src);
+ const __m128i reg1 = _mm256_extracti128_si256(src, 1);
+ _mm_storeu_si128((__m128i *)dst, reg0);
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + offset), reg1);
+}
+
+static inline void storeu_x_u8_16x2_ssse3(const __m256i src, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ storeu_x_8bit_16x2_ssse3(src, dst, sizeof(*dst) * stride);
+}
+
+static inline void pack_store_x_16x2_avx2(const __m256i data0,
+ const __m256i data1,
+ uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i res = _mm256_packus_epi16(data0, data1);
+ storeu_x_u8_16x2_ssse3(res, dst, stride);
+}
+
+static inline void round_pack_store_16x2_avx2(const __m256i *data,
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ __m256i reg[2];
+
+ reg[0] = round_sr_x_avx2(data[0]);
+ reg[1] = round_sr_x_avx2(data[1]);
+ pack_store_x_16x2_avx2(reg[0], reg[1], dst, dst_stride);
+}
+
+static inline void convolve_x_2tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ __m256i *data) {
+ const __m256i res0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i res1 = _mm256_loadu_si256((__m256i *)(src + 1));
+ const __m256i reg0 = _mm256_unpacklo_epi8(res0, res1);
+ const __m256i reg1 = _mm256_unpackhi_epi8(res0, res1);
+
+ data[0] = convolve_x_2tap_avx2(®0, coeffs);
+ data[1] = convolve_x_2tap_avx2(®1, coeffs);
+}
+
+static inline void pack_store_x_avx2(const __m256i data0, const __m256i data1,
+ uint8_t *const dst) {
+ const __m256i reg = _mm256_packus_epi16(data0, data1);
+ _mm256_storeu_si256((__m256i *)dst, reg);
+}
+
+static inline void round_pack_store_32_avx2(const __m256i *data,
+ uint8_t *const dst) {
+ __m256i reg[2];
+
+ reg[0] = round_sr_x_avx2(data[0]);
+ reg[1] = round_sr_x_avx2(data[1]);
+ pack_store_x_avx2(reg[0], reg[1], dst);
+}
+
+static inline void convolve_round_2tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ uint8_t *const dst) {
+ __m256i data[2];
+
+ convolve_x_2tap_32_avx2(src, coeffs, data);
+ round_pack_store_32_avx2(data, dst);
+}
+
+static inline void load_avg_store_2tap_32_avx2(const uint8_t *const src,
+ uint8_t *const dst) {
+ const __m256i res0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i res1 = _mm256_loadu_si256((__m256i *)(src + 1));
+ const __m256i data = _mm256_avg_epu8(res0, res1);
+ _mm256_storeu_si256((__m256i *)dst, data);
+}
+
+static inline __m256i load_convolve_8tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i *coeffs,
+ const __m256i *flt) {
+ const __m256i res = loadu_x_8bit_16x2_avx2(src, stride);
+ return convolve_lowbd_x(res, coeffs, flt);
+}
+
+static inline void load_convolve_8tap_16x2_avx2(const uint8_t *const src,
+ const int32_t src_stride,
+ const __m256i *coeffs,
+ const __m256i *flt,
+ __m256i *reg) {
+ reg[0] = load_convolve_8tap_8x2_avx2(src + 0, src_stride, coeffs, flt);
+ reg[1] = load_convolve_8tap_8x2_avx2(src + 8, src_stride, coeffs, flt);
+}
+
+static inline void load_convolve_8tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ const __m256i *filt,
+ __m256i *data) {
+ const __m256i reg_0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i reg_8 = _mm256_loadu_si256((__m256i *)(src + 8));
+
+ data[0] = convolve_lowbd_x(reg_0, coeffs, filt);
+ data[1] = convolve_lowbd_x(reg_8, coeffs, filt);
+}
+
+static inline void load_convolve_round_8tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ const __m256i *filt,
+ uint8_t *const dst) {
+ __m256i data[2];
+
+ load_convolve_8tap_32_avx2(src, coeffs, filt, data);
+ round_pack_store_32_avx2(data, dst);
+}
+
+static inline void load_convolve_6tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ const __m256i *filt,
+ __m256i *data) {
+ const __m256i reg0 = _mm256_loadu_si256((__m256i *)src);
+ const __m256i reg1 = _mm256_loadu_si256((__m256i *)(src + 8));
+
+ data[0] = convolve_lowbd_x_6tap(reg0, coeffs, filt);
+ data[1] = convolve_lowbd_x_6tap(reg1, coeffs, filt);
+}
+
+static inline void convolve_sr_store_6tap_32_avx2(const uint8_t *const src,
+ const __m256i *coeffs,
+ const __m256i *filt,
+ uint8_t *const dst) {
+ __m256i data[2];
+
+ load_convolve_6tap_32_avx2(src, coeffs, filt, data);
+ round_pack_store_32_avx2(data, dst);
+}
+
+static inline __m256i load_convolve_6tap_8x2_avx2(const uint8_t *const src,
+ const ptrdiff_t stride,
+ const __m256i *coeffs,
+ const __m256i *filt) {
+ const __m256i data = loadu_x_8bit_16x2_avx2(src, stride);
+ return convolve_lowbd_x_6tap(data, coeffs, filt);
+}
+
+static inline void load_convolve_6tap_16x2_avx2(const uint8_t *const src,
+ const int32_t src_stride,
+ const __m256i *coeffs,
+ const __m256i *filt,
+ __m256i *data) {
+ data[0] = load_convolve_6tap_8x2_avx2(src + 0, src_stride, coeffs, filt);
+ data[1] = load_convolve_6tap_8x2_avx2(src + 8, src_stride, coeffs, filt);
+}
+
#endif // AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
diff --git a/av1/common/x86/convolve_avx2.c b/av1/common/x86/convolve_avx2.c
index d250d88427..f6ebc15313 100644
--- a/av1/common/x86/convolve_avx2.c
+++ b/av1/common/x86/convolve_avx2.c
@@ -536,50 +536,61 @@ static inline void av1_convolve_x_sr_general_avx2(
int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
ConvolveParams *conv_params) {
const int bits = FILTER_BITS - conv_params->round_0;
- const __m128i round_shift = _mm_cvtsi32_si128(bits);
- __m256i round_0_const =
- _mm256_set1_epi16((1 << (conv_params->round_0 - 1)) >> 1);
- __m128i round_0_shift = _mm_cvtsi32_si128(conv_params->round_0 - 1);
- __m256i round_const = _mm256_set1_epi16((1 << bits) >> 1);
- int i, horiz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
+ int i, j, horiz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
assert(bits >= 0);
assert((FILTER_BITS - conv_params->round_1) >= 0 ||
((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS));
assert(conv_params->round_0 > 0);
- __m256i coeffs[6], filt[4];
- filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
- filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
+ assert(horiz_tap == 2 || horiz_tap == 4 || horiz_tap == 6 || horiz_tap == 8 ||
+ horiz_tap == 12);
+ assert((!(w % 2)) || (w <= 128));
+ assert((h % 2) == 0);
- if (horiz_tap == 6)
- prepare_coeffs_6t_lowbd(filter_params_x, subpel_x_qn, coeffs);
- else if (horiz_tap == 12) {
- prepare_coeffs_12taps(filter_params_x, subpel_x_qn, coeffs);
- } else {
- prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs);
- }
+ __m256i coeffs[6] = { 0 }, filt[4] = { 0 };
+ __m128i coeffs_128[4] = { 0 };
+ i = 0;
// horz_filt as 4 tap
if (horiz_tap == 4) {
- const int fo_horiz = 1;
- const uint8_t *const src_ptr = src - fo_horiz;
- if (w <= 8) {
- for (i = 0; i < h; i += 2) {
- const __m256i data = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
- _mm256_castsi128_si256(_mm_loadu_si128(
- (__m128i *)(&src_ptr[i * src_stride + src_stride]))),
- 0x20);
+ // since fo_horiz = 1
+ const uint8_t *src_ptr = src - 1;
+ if (w == 2) {
+ prepare_coeffs_4t_ssse3(filter_params_x, subpel_x_qn, coeffs_128);
+ do {
+ const __m128i res =
+ convolve_x_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i reg = round_sr_x_ssse3(res);
+ pack_store_x_2x2_sse2(reg, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 4) {
+ prepare_coeffs_4t_ssse3(filter_params_x, subpel_x_qn, coeffs_128);
+ do {
+ const __m128i reg =
+ convolve_x_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i res = round_sr_x_ssse3(reg);
+ pack_store_x_4x2_sse2(res, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 8) {
+ prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs);
+ filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
+ filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
+ do {
+ const __m256i data = _mm256_setr_m128i(
+ _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride])),
+ _mm_loadu_si128(
+ (__m128i *)(&src_ptr[i * src_stride + src_stride])));
__m256i res_16b = convolve_lowbd_x_4tap(data, coeffs + 1, filt);
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
+ res_16b = round_sr_x_avx2(res_16b);
/* rounding code */
// 8 bit conversion and saturation to uint8
@@ -588,22 +599,18 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i res_0 = _mm256_castsi256_si128(res_8b);
const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
- if (w > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
- } else if (w > 2) {
- xx_storel_32(&dst[i * dst_stride], res_0);
- xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
- } else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
- __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
- *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
- }
- }
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
+ i += 2;
+ } while (i < h);
} else {
- for (i = 0; i < h; ++i) {
- for (int j = 0; j < w; j += 16) {
+ assert(!(w % 16));
+ prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs);
+ filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
+ filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
+ do {
+ j = 0;
+ do {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
// 18 19 20 21 22 23
const __m256i data = _mm256_inserti128_si256(
@@ -613,11 +620,7 @@ static inline void av1_convolve_x_sr_general_avx2(
__m256i res_16b = convolve_lowbd_x_4tap(data, coeffs + 1, filt);
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
+ res_16b = round_sr_x_avx2(res_16b);
/* rounding code */
// 8 bit conversion and saturation to uint8
@@ -628,31 +631,28 @@ static inline void av1_convolve_x_sr_general_avx2(
res_8b = _mm256_permute4x64_epi64(res_8b, 216);
__m128i res = _mm256_castsi256_si128(res_8b);
_mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
- }
- }
+ j += 16;
+ } while (j < w);
+ i++;
+ } while (i < h);
}
} else if (horiz_tap == 6) {
- const int fo_horiz = horiz_tap / 2 - 1;
- const uint8_t *const src_ptr = src - fo_horiz;
+ // since (horiz_tap/2 - 1 == 2)
+ const uint8_t *src_ptr = src - 2;
+ prepare_coeffs_6t_lowbd(filter_params_x, subpel_x_qn, coeffs);
+ filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
+ filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
- filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
-
- if (w <= 8) {
- for (i = 0; i < h; i += 2) {
- const __m256i data = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
- _mm256_castsi128_si256(_mm_loadu_si128(
- (__m128i *)(&src_ptr[i * src_stride + src_stride]))),
- 0x20);
+ if (w == 8) {
+ do {
+ const __m256i data = _mm256_setr_m128i(
+ _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride])),
+ _mm_loadu_si128(
+ (__m128i *)(&src_ptr[i * src_stride + src_stride])));
__m256i res_16b = convolve_lowbd_x_6tap(data, coeffs, filt);
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
+ res_16b = round_sr_x_avx2(res_16b);
/* rounding code */
// 8 bit conversion and saturation to uint8
@@ -660,60 +660,132 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i res_0 = _mm256_castsi256_si128(res_8b);
const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
- if (w > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
- } else if (w > 2) {
- xx_storel_32(&dst[i * dst_stride], res_0);
- xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
- } else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
- __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
- *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
- }
- }
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
+ i += 2;
+ } while (i < h);
+ } else if (w == 16) {
+ do {
+ __m256i data[2] = { 0 };
+
+ load_convolve_6tap_16x2_avx2(src_ptr, src_stride, coeffs, filt, data);
+ round_pack_store_16x2_avx2(data, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 32) {
+ do {
+ convolve_sr_store_6tap_32_avx2(src_ptr, coeffs, filt, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else if (w == 64) {
+ do {
+ convolve_sr_store_6tap_32_avx2(src_ptr, coeffs, filt, dst);
+ convolve_sr_store_6tap_32_avx2(src_ptr + 32, coeffs, filt, dst + 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
} else {
- for (i = 0; i < h; ++i) {
- for (int j = 0; j < w; j += 16) {
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
- // 18 19 20 21 22 23
- const __m256i data = _mm256_inserti128_si256(
- _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
- _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
- 1);
-
- __m256i res_16b = convolve_lowbd_x_6tap(data, coeffs, filt);
+ assert(w == 128);
+
+ do {
+ convolve_sr_store_6tap_32_avx2(src_ptr, coeffs, filt, dst);
+ convolve_sr_store_6tap_32_avx2(src_ptr + SECOND_32_BLK, coeffs, filt,
+ dst + SECOND_32_BLK);
+ convolve_sr_store_6tap_32_avx2(src_ptr + THIRD_32_BLK, coeffs, filt,
+ dst + THIRD_32_BLK);
+ convolve_sr_store_6tap_32_avx2(src_ptr + FOURTH_32_BLK, coeffs, filt,
+ dst + FOURTH_32_BLK);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ }
+ } else if (horiz_tap == 8) {
+ // since (horiz_tap / 2 - 1) == 3
+ const uint8_t *src_ptr = src - 3;
+ prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs);
+ filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
+ filt[1] =
+ _mm256_load_si256((__m256i const *)(filt_global_avx2 + SECOND_32_BLK));
+ filt[2] =
+ _mm256_load_si256((__m256i const *)(filt_global_avx2 + THIRD_32_BLK));
+ filt[3] =
+ _mm256_load_si256((__m256i const *)(filt_global_avx2 + FOURTH_32_BLK));
+
+ if (w == 8) {
+ do {
+ const __m256i data = _mm256_setr_m128i(
+ _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride])),
+ _mm_loadu_si128(
+ (__m128i *)(&src_ptr[i * src_stride + src_stride])));
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
+ __m256i res_16b = convolve_lowbd_x(data, coeffs, filt);
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
+ res_16b = round_sr_x_avx2(res_16b);
- /* rounding code */
- // 8 bit conversion and saturation to uint8
- __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
+ /* rounding code */
+ // 8 bit conversion and saturation to uint8
+ __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
- // Store values into the destination buffer
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
- res_8b = _mm256_permute4x64_epi64(res_8b, 216);
- __m128i res = _mm256_castsi256_si128(res_8b);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
- }
- }
+ const __m128i res_0 = _mm256_castsi256_si128(res_8b);
+ const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
+ _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
+ i += 2;
+ } while (i < h);
+ } else if (w == 16) {
+ do {
+ __m256i data[2] = { 0 };
+
+ load_convolve_8tap_16x2_avx2(src_ptr, src_stride, coeffs, filt, data);
+ round_pack_store_16x2_avx2(data, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 32) {
+ do {
+ load_convolve_round_8tap_32_avx2(src_ptr, coeffs, filt, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else if (w == 64) {
+ do {
+ load_convolve_round_8tap_32_avx2(src_ptr, coeffs, filt, dst);
+ load_convolve_round_8tap_32_avx2(src_ptr + 32, coeffs, filt, dst + 32);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else {
+ assert(w == 128);
+ do {
+ load_convolve_round_8tap_32_avx2(src_ptr, coeffs, filt, dst);
+ load_convolve_round_8tap_32_avx2(src_ptr + SECOND_32_BLK, coeffs, filt,
+ dst + SECOND_32_BLK);
+ load_convolve_round_8tap_32_avx2(src_ptr + THIRD_32_BLK, coeffs, filt,
+ dst + THIRD_32_BLK);
+ load_convolve_round_8tap_32_avx2(src_ptr + FOURTH_32_BLK, coeffs, filt,
+ dst + FOURTH_32_BLK);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
}
} else if (horiz_tap == 12) { // horiz_tap == 12
const int fo_horiz = filter_params_x->taps / 2 - 1;
+ prepare_coeffs_12taps(filter_params_x, subpel_x_qn, coeffs);
+ const __m128i round_shift = _mm_cvtsi32_si128(bits);
const uint8_t *const src_ptr = src - fo_horiz;
const __m256i v_zero = _mm256_setzero_si256();
- round_0_const = _mm256_set1_epi32((1 << (conv_params->round_0)) >> 1);
- round_const = _mm256_set1_epi32((1 << bits) >> 1);
- round_0_shift = _mm_cvtsi32_si128(conv_params->round_0);
- __m256i s[6];
+ __m256i round_0_const =
+ _mm256_set1_epi32((1 << (conv_params->round_0)) >> 1);
+ __m256i round_const = _mm256_set1_epi32((1 << bits) >> 1);
+ __m128i round_0_shift = _mm_cvtsi32_si128(conv_params->round_0);
+ __m256i s[6] = { 0 };
if (w <= 4) {
- for (i = 0; i < h; i += 2) {
+ do {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
@@ -780,10 +852,13 @@ static inline void av1_convolve_x_sr_general_avx2(
*(uint16_t *)&dst[i * dst_stride + dst_stride] =
(uint16_t)_mm_cvtsi128_si32(res_1);
}
- }
+ i += 2;
+ } while (i < h);
} else {
- for (i = 0; i < h; i++) {
- for (int j = 0; j < w; j += 8) {
+ assert(!(w % 8));
+ do {
+ j = 0;
+ do {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride + j]))),
@@ -826,79 +901,188 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
*(int *)&dst[i * dst_stride + j] = _mm_cvtsi128_si32(res_0);
*(int *)&dst[i * dst_stride + j + 4] = _mm_cvtsi128_si32(res_1);
- }
- }
+
+ j += 8;
+ } while (j < w);
+ i++;
+ } while (i < h);
}
} else {
- const int fo_horiz = filter_params_x->taps / 2 - 1;
- const uint8_t *const src_ptr = src - fo_horiz;
- filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
- filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
-
- if (w <= 8) {
- for (i = 0; i < h; i += 2) {
- const __m256i data = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
- _mm256_castsi128_si256(_mm_loadu_si128(
- (__m128i *)(&src_ptr[i * src_stride + src_stride]))),
- 0x20);
-
- __m256i res_16b = convolve_lowbd_x(data, coeffs, filt);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
-
- /* rounding code */
- // 8 bit conversion and saturation to uint8
- __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
-
- const __m128i res_0 = _mm256_castsi256_si128(res_8b);
- const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
- if (w > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
- } else if (w > 2) {
- xx_storel_32(&dst[i * dst_stride], res_0);
- xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
+ assert(horiz_tap == 2);
+ // since (filter_params_x->taps / 2 - 1) == 0
+ const uint8_t *src_ptr = src;
+ if (subpel_x_qn != 8) {
+ if (w <= 8) {
+ prepare_coeffs_2t_ssse3(filter_params_x, subpel_x_qn, coeffs_128);
+
+ if (w == 2) {
+ do {
+ const __m128i data =
+ convolve_x_2tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i reg = round_sr_x_ssse3(data);
+ pack_store_x_2x2_sse2(reg, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 4) {
+ do {
+ const __m128i data =
+ convolve_x_2tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
+ const __m128i reg = round_sr_x_ssse3(data);
+ pack_store_4x2_sse2(reg, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else {
+ assert(w == 8);
+
+ do {
+ __m128i data[2] = { 0 };
+
+ convolve_x_2tap_8x2_ssse3(src_ptr, src_stride, coeffs_128, data);
+ data[0] = round_sr_x_ssse3(data[0]);
+ data[1] = round_sr_x_ssse3(data[1]);
+ const __m128i reg = _mm_packus_epi16(data[0], data[1]);
+ _mm_storel_epi64((__m128i *)dst, reg);
+ _mm_storeh_epi64((__m128i *)(dst + dst_stride), reg);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ }
+ } else {
+ prepare_coeffs_2t_lowbd(filter_params_x, subpel_x_qn, coeffs);
+
+ if (w == 16) {
+ do {
+ __m256i data[2] = { 0 };
+
+ convolve_x_2tap_16x2_avx2(src_ptr, src_stride, coeffs, data);
+ round_pack_store_16x2_avx2(data, dst, dst_stride);
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 32) {
+ do {
+ convolve_round_2tap_32_avx2(src_ptr, coeffs, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else if (w == 64) {
+ do {
+ convolve_round_2tap_32_avx2(src_ptr, coeffs, dst);
+ convolve_round_2tap_32_avx2(src_ptr + SECOND_32_BLK, coeffs,
+ dst + SECOND_32_BLK);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
} else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
- __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
- *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
+ assert(w == 128);
+
+ do {
+ convolve_round_2tap_32_avx2(src_ptr, coeffs, dst);
+ convolve_round_2tap_32_avx2(src_ptr + (SECOND_32_BLK), coeffs,
+ dst + (SECOND_32_BLK));
+ convolve_round_2tap_32_avx2(src_ptr + (THIRD_32_BLK), coeffs,
+ dst + (THIRD_32_BLK));
+ convolve_round_2tap_32_avx2(src_ptr + (FOURTH_32_BLK), coeffs,
+ dst + (FOURTH_32_BLK));
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
}
}
} else {
- for (i = 0; i < h; ++i) {
- for (int j = 0; j < w; j += 16) {
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
- // 18 19 20 21 22 23
- const __m256i data = _mm256_inserti128_si256(
- _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
- _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
- 1);
-
- __m256i res_16b = convolve_lowbd_x(data, coeffs, filt);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
- round_0_shift);
-
- res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
- round_shift);
-
- /* rounding code */
- // 8 bit conversion and saturation to uint8
- __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
-
- // Store values into the destination buffer
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
- res_8b = _mm256_permute4x64_epi64(res_8b, 216);
- __m128i res = _mm256_castsi256_si128(res_8b);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
- }
+ if (w == 2) {
+ do {
+ __m128i data = load_x_u8_4x2_sse4(src_ptr, src_stride);
+ const __m128i reg1 = _mm_srli_si128(data, 1);
+ const __m128i reg2 = _mm_avg_epu8(data, reg1);
+ *(uint16_t *)dst = (uint16_t)_mm_cvtsi128_si32(reg2);
+ *(uint16_t *)(dst + dst_stride) = _mm_extract_epi16(reg2, 2);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 4) {
+ do {
+ __m128i data = load_8bit_8x2_to_1_reg_sse2(
+ src_ptr, (int)(sizeof(*src_ptr) * src_stride));
+ const __m128i reg1 = _mm_srli_si128(data, 1);
+ const __m128i reg2 = _mm_avg_epu8(data, reg1);
+ xx_storel_32(dst, reg2);
+ *(int32_t *)(dst + dst_stride) = _mm_extract_epi32(reg2, 2);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 8) {
+ do {
+ const __m128i data00 = _mm_loadu_si128((__m128i *)src_ptr);
+ const __m128i data10 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride));
+ const __m128i data01 = _mm_srli_si128(data00, 1);
+ const __m128i data11 = _mm_srli_si128(data10, 1);
+ const __m128i reg0 = _mm_avg_epu8(data00, data01);
+ const __m128i reg1 = _mm_avg_epu8(data10, data11);
+ _mm_storel_epi64((__m128i *)dst, reg0);
+ _mm_storel_epi64((__m128i *)(dst + dst_stride), reg1);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 16) {
+ do {
+ const __m128i data00 = _mm_loadu_si128((__m128i *)src_ptr);
+ const __m128i data01 = _mm_loadu_si128((__m128i *)(src_ptr + 1));
+ const __m128i data10 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride));
+ const __m128i data11 =
+ _mm_loadu_si128((__m128i *)(src_ptr + src_stride + 1));
+ const __m128i reg0 = _mm_avg_epu8(data00, data01);
+ const __m128i reg1 = _mm_avg_epu8(data10, data11);
+ _mm_storeu_si128((__m128i *)dst, reg0);
+ _mm_storeu_si128((__m128i *)(dst + dst_stride), reg1);
+
+ src_ptr += 2 * src_stride;
+ dst += 2 * dst_stride;
+ h -= 2;
+ } while (h);
+ } else if (w == 32) {
+ do {
+ load_avg_store_2tap_32_avx2(src_ptr, dst);
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else if (w == 64) {
+ do {
+ load_avg_store_2tap_32_avx2(src_ptr, dst);
+ load_avg_store_2tap_32_avx2(src_ptr + (SECOND_32_BLK),
+ dst + (SECOND_32_BLK));
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
+ } else {
+ assert(w == 128);
+
+ do {
+ load_avg_store_2tap_32_avx2(src_ptr, dst);
+ load_avg_store_2tap_32_avx2(src_ptr + (SECOND_32_BLK),
+ dst + (SECOND_32_BLK));
+ load_avg_store_2tap_32_avx2(src_ptr + (THIRD_32_BLK),
+ dst + (THIRD_32_BLK));
+ load_avg_store_2tap_32_avx2(src_ptr + (FOURTH_32_BLK),
+ dst + (FOURTH_32_BLK));
+ src_ptr += src_stride;
+ dst += dst_stride;
+ } while ((--h) > 0);
}
}
}
@@ -910,19 +1094,6 @@ void av1_convolve_x_sr_avx2(const uint8_t *src, int32_t src_stride,
const InterpFilterParams *filter_params_x,
const int32_t subpel_x_qn,
ConvolveParams *conv_params) {
-#if CONFIG_SVT_AV1
- const int horz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
-
- if (horz_tap == 12) {
- av1_convolve_x_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, subpel_x_qn, conv_params);
- } else {
- av1_convolve_x_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_x, subpel_x_qn,
- conv_params);
- }
-#else
av1_convolve_x_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn, conv_params);
-#endif
}