Commit c501b07be3 for aom
commit c501b07be392027178f0a522bcc14836ea8e6f06
Author: Satheesh Kumar <satheesh.kumar@ittiam.com>
Date: Wed Mar 11 19:54:28 2026 +0530
Improve av1_convolve_y_sr_general_avx2()
This patch introduces the following optimizations:
- Added a specialized width wise handling for 2-tap filtering, with
separate handling for subpel_y_qn = 8
- Improved the existing 4/6/8-tap filtering by optimizations based on
width
The scaling of av1_convolve_y_sr_avx2() wrt existing implementation is
provided below:
BlockSize 2tap 4tap 6tap 8tap
2x2 1.07 0.95 - -
4x4 1.00 0.96 - -
8x8 0.91 0.91 0.98 0.94
16x16 0.96 0.97 0.95 1.03
32x32 0.98 0.99 1.44 1.67
64x64 1.00 0.98 1.45 1.71
128x128 0.98 0.98 1.48 1.71
Change-Id: Ie2fb8a1a26652d748351a6b3bd6edb5d4513c0bd
diff --git a/aom_dsp/x86/convolve_avx2.h b/aom_dsp/x86/convolve_avx2.h
index 76a6ca0c55..ffafa13319 100644
--- a/aom_dsp/x86/convolve_avx2.h
+++ b/aom_dsp/x86/convolve_avx2.h
@@ -715,7 +715,7 @@ static inline void sr_2d_ver_round_and_store(__m256i res_a, __m256i res_b,
static inline void prepare_coeffs_2t_ssse3(
const InterpFilterParams *const filter_params, const int32_t subpel_q4,
- __m128i *const coeffs /* [2] */) {
+ __m128i *const coeffs /* [4] */) {
const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
@@ -736,7 +736,7 @@ static inline void prepare_coeffs_2t_ssse3(
static inline void prepare_coeffs_4t_ssse3(
const InterpFilterParams *const filter_params, const int32_t subpel_q4,
- __m128i *const coeffs /* [2] */) {
+ __m128i *const coeffs /* [4] */) {
const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
@@ -757,6 +757,58 @@ static inline void prepare_coeffs_4t_ssse3(
coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0a08u));
}
+static inline void prepare_coeffs_6t_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [4] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[0] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0402u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0806u));
+ // coeffs 5 6 5 6 5 6 5 6
+ coeffs[2] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0c0au));
+}
+
+static inline void prepare_coeffs_ssse3(
+ const InterpFilterParams *const filter_params, const int32_t subpel_q4,
+ __m128i *const coeffs /* [4] */) {
+ const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+ filter_params, subpel_q4 & SUBPEL_MASK);
+ const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+
+ // right shift all filter co-efficients by 1 to reduce the bits required.
+ // This extra right shift will be taken care of at the end while rounding
+ // the result.
+ // Since all filter co-efficients are even, this change will not affect the
+ // end result
+ assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+ _mm_set1_epi16((short)0xffff)));
+
+ const __m128i coeffs_1 = _mm_srai_epi16(coeffs_8, 1);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ coeffs[0] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0200u));
+ // coeffs 2 3 2 3 2 3 2 3
+ coeffs[1] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0604u));
+ // coeffs 4 5 4 5 4 5 4 5
+ coeffs[2] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0a08u));
+ // coeffs 6 7 6 7 6 7 6 7
+ coeffs[3] = _mm_shuffle_epi8(coeffs_1, _mm_set1_epi16(0x0e0cu));
+}
+
static inline void prepare_coeffs_2t_lowbd(
const InterpFilterParams *const filter_params, const int subpel_q4,
__m256i *const coeffs /* [4] */) {
@@ -952,6 +1004,30 @@ static inline __m128i convolve_lowbd_4tap_ssse3(const __m128i ss[2],
return _mm_add_epi16(res_01, res_23);
}
+static inline __m128i convolve_lowbd_6tap_ssse3(const __m128i ss[3],
+ const __m128i coeffs[3]) {
+ const __m128i res_01 = _mm_maddubs_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_maddubs_epi16(ss[1], coeffs[1]);
+ const __m128i res_45 = _mm_maddubs_epi16(ss[2], coeffs[2]);
+
+ const __m128i res = _mm_add_epi16(_mm_add_epi16(res_01, res_45), res_23);
+
+ return res;
+}
+
+static inline __m128i convolve_lowbd_ssse3(const __m128i ss[4],
+ const __m128i coeffs[4]) {
+ const __m128i res_01 = _mm_maddubs_epi16(ss[0], coeffs[0]);
+ const __m128i res_23 = _mm_maddubs_epi16(ss[1], coeffs[1]);
+ const __m128i res_45 = _mm_maddubs_epi16(ss[2], coeffs[2]);
+ const __m128i res_67 = _mm_maddubs_epi16(ss[3], coeffs[3]);
+
+ const __m128i res = _mm_add_epi16(_mm_add_epi16(res_01, res_45),
+ _mm_add_epi16(res_23, res_67));
+
+ return res;
+}
+
static inline __m256i convolve_lowbd(const __m256i *const s,
const __m256i *const coeffs) {
const __m256i res_01 = _mm256_maddubs_epi16(s[0], coeffs[0]);
@@ -1210,17 +1286,17 @@ static inline __m128i round_sr_x_ssse3(const __m128i data) {
return _mm_srai_epi16(reg, 6);
}
-static inline void store_x_u8_4x2_sse2(const __m128i reg, uint8_t *const dst,
+static inline void store_8bit_4x2_sse2(const __m128i reg, uint8_t *const dst,
const ptrdiff_t dst_stride) {
xx_storel_32(dst, reg);
*(uint32_t *)(dst + dst_stride) =
((uint32_t)_mm_extract_epi16(reg, 3) << 16) | _mm_extract_epi16(reg, 2);
}
-static inline void pack_store_x_4x2_sse2(const __m128i reg, uint8_t *const dst,
- const ptrdiff_t dst_stride) {
+static inline void pack_store_u8_4x2_sse2(const __m128i reg, uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
const __m128i reg_pack = _mm_packus_epi16(reg, reg);
- store_x_u8_4x2_sse2(reg_pack, dst, dst_stride);
+ store_8bit_4x2_sse2(reg_pack, dst, dst_stride);
}
static inline __m128i convolve_x_4tap_2x2_ssse3(const uint8_t *const src,
@@ -1237,8 +1313,8 @@ static inline __m128i convolve_x_4tap_2x2_ssse3(const uint8_t *const src,
return convolve_lowbd_4tap_ssse3(data, coeffs);
}
-static inline void pack_store_x_2x2_sse2(const __m128i reg, uint8_t *const dst,
- const ptrdiff_t dst_stride) {
+static inline void pack_store_u8_2x2_sse2(const __m128i reg, uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
const __m128i data = _mm_packus_epi16(reg, reg);
*(int16_t *)dst = (int16_t)_mm_cvtsi128_si32(data);
*(int16_t *)(dst + dst_stride) = (int16_t)_mm_extract_epi16(data, 1);
@@ -1319,25 +1395,33 @@ static inline void convolve_x_2tap_16x2_avx2(const uint8_t *const src,
data[1] = convolve_x_2tap_avx2(&res1, coeffs);
}
-static inline void storeu_x_8bit_16x2_ssse3(const __m256i src, void *const dst,
- const ptrdiff_t offset) {
+static inline void store_u8_16x2_avx2(const __m256i src, uint8_t *const dst,
+ const ptrdiff_t stride) {
const __m128i reg0 = _mm256_castsi256_si128(src);
const __m128i reg1 = _mm256_extracti128_si256(src, 1);
_mm_storeu_si128((__m128i *)dst, reg0);
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + offset), reg1);
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + stride), reg1);
}
-static inline void storeu_x_u8_16x2_ssse3(const __m256i src, uint8_t *const dst,
- const ptrdiff_t stride) {
- storeu_x_8bit_16x2_ssse3(src, dst, sizeof(*dst) * stride);
+static inline void store_u8_8x2_avx2(const __m256i src, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m128i reg0 = _mm256_castsi256_si128(src);
+ const __m128i reg1 = _mm256_extracti128_si256(src, 1);
+ _mm_storel_epi64((__m128i *)dst, reg0);
+ _mm_storel_epi64((__m128i *)(dst + stride), reg1);
}
-static inline void pack_store_x_16x2_avx2(const __m256i data0,
- const __m256i data1,
- uint8_t *const dst,
- const ptrdiff_t stride) {
+static inline void pack_store_16x2_avx2(const __m256i data0,
+ const __m256i data1, uint8_t *const dst,
+ const ptrdiff_t stride) {
const __m256i res = _mm256_packus_epi16(data0, data1);
- storeu_x_u8_16x2_ssse3(res, dst, stride);
+ store_u8_16x2_avx2(res, dst, stride);
+}
+
+static inline void pack_store_8x2_avx2(const __m256i data, uint8_t *const dst,
+ const ptrdiff_t stride) {
+ const __m256i res = _mm256_packus_epi16(data, data);
+ store_u8_8x2_avx2(res, dst, stride);
}
static inline void round_pack_store_16x2_avx2(const __m256i *data,
@@ -1347,7 +1431,7 @@ static inline void round_pack_store_16x2_avx2(const __m256i *data,
reg[0] = round_sr_x_avx2(data[0]);
reg[1] = round_sr_x_avx2(data[1]);
- pack_store_x_16x2_avx2(reg[0], reg[1], dst, dst_stride);
+ pack_store_16x2_avx2(reg[0], reg[1], dst, dst_stride);
}
static inline void convolve_x_2tap_32_avx2(const uint8_t *const src,
@@ -1362,8 +1446,8 @@ static inline void convolve_x_2tap_32_avx2(const uint8_t *const src,
data[1] = convolve_x_2tap_avx2(®1, coeffs);
}
-static inline void pack_store_x_avx2(const __m256i data0, const __m256i data1,
- uint8_t *const dst) {
+static inline void pack_store_32_avx2(const __m256i data0, const __m256i data1,
+ uint8_t *const dst) {
const __m256i reg = _mm256_packus_epi16(data0, data1);
_mm256_storeu_si256((__m256i *)dst, reg);
}
@@ -1374,7 +1458,7 @@ static inline void round_pack_store_32_avx2(const __m256i *data,
reg[0] = round_sr_x_avx2(data[0]);
reg[1] = round_sr_x_avx2(data[1]);
- pack_store_x_avx2(reg[0], reg[1], dst);
+ pack_store_32_avx2(reg[0], reg[1], dst);
}
static inline void convolve_round_2tap_32_avx2(const uint8_t *const src,
@@ -1470,4 +1554,329 @@ static inline void load_convolve_6tap_16x2_avx2(const uint8_t *const src,
data[1] = load_convolve_6tap_8x2_avx2(src + 8, src_stride, coeffs, filt);
}
+static inline __m128i round_sr_y_ssse3(const __m128i data) {
+ const __m128i value = _mm_set1_epi16(32);
+ const __m128i reg = _mm_add_epi16(data, value);
+ return _mm_srai_epi16(reg, FILTER_BITS - 1);
+}
+
+static inline __m256i round_sr_y_avx2(const __m256i data) {
+ const __m256i value = _mm256_set1_epi16(32);
+ const __m256i reg = _mm256_add_epi16(data, value);
+ return _mm256_srai_epi16(reg, FILTER_BITS - 1);
+}
+
+static inline void round_pack_store_y_8x2_avx2(const __m256i res,
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ __m256i r;
+
+ r = round_sr_y_avx2(res);
+ pack_store_8x2_avx2(r, dst, dst_stride);
+}
+
+static inline void round_pack_store_y_16x2_avx2(const __m256i res[2],
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ __m256i r[2];
+
+ r[0] = round_sr_y_avx2(res[0]);
+ r[1] = round_sr_y_avx2(res[1]);
+ pack_store_16x2_avx2(r[0], r[1], dst, dst_stride);
+}
+
+static inline void round_pack_store_y_32_avx2(const __m256i res[2],
+ uint8_t *const dst) {
+ __m256i r[2];
+
+ r[0] = round_sr_y_avx2(res[0]);
+ r[1] = round_sr_y_avx2(res[1]);
+ pack_store_32_avx2(r[0], r[1], dst);
+}
+
+static inline void round_pack_store_y_32x2_avx2(const __m256i res[4],
+ uint8_t *const dst,
+ const ptrdiff_t dst_stride) {
+ round_pack_store_y_32_avx2(res, dst);
+ round_pack_store_y_32_avx2(res + 2, dst + dst_stride);
+}
+
+static inline void convolve_y_2tap_2x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i *coeffs,
+ __m128i d[2], __m128i *res) {
+ d[1] = _mm_cvtsi32_si128(loadu_int16(data + 1 * stride));
+ const __m128i src_01a = _mm_unpacklo_epi16(d[0], d[1]);
+ d[0] = _mm_cvtsi32_si128(loadu_int16(data + 2 * stride));
+ const __m128i src_12a = _mm_unpacklo_epi16(d[1], d[0]);
+
+ const __m128i s = _mm_unpacklo_epi8(src_01a, src_12a);
+
+ *res = _mm_maddubs_epi16(s, coeffs[0]);
+}
+
+static inline void convolve_y_4tap_2x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2],
+ __m128i d[4], __m128i s[2],
+ __m128i *res) {
+ d[3] = _mm_cvtsi32_si128(loadu_int16(data + 3 * stride));
+ const __m128i src_23a = _mm_unpacklo_epi16(d[2], d[3]);
+ d[2] = _mm_cvtsi32_si128(loadu_int16(data + 4 * stride));
+ const __m128i src_34a = _mm_unpacklo_epi16(d[3], d[2]);
+
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
+
+ *res = convolve_lowbd_4tap_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_6tap_2x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3],
+ __m128i d[6], __m128i s[3],
+ __m128i *res) {
+ d[5] = _mm_cvtsi32_si128(loadu_int16(data + 5 * stride));
+ const __m128i src_45a = _mm_unpacklo_epi16(d[4], d[5]);
+ d[4] = _mm_cvtsi32_si128(loadu_int16(data + 6 * stride));
+ const __m128i src_56a = _mm_unpacklo_epi16(d[5], d[4]);
+
+ s[2] = _mm_unpacklo_epi8(src_45a, src_56a);
+
+ *res = convolve_lowbd_6tap_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_8tap_2x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[4],
+ __m128i d[8], __m128i s[4],
+ __m128i *res) {
+ d[7] = _mm_cvtsi32_si128(loadu_int16(data + 7 * stride));
+ const __m128i src_67a = _mm_unpacklo_epi16(d[6], d[7]);
+ d[6] = _mm_cvtsi32_si128(loadu_int16(data + 8 * stride));
+ const __m128i src_78a = _mm_unpacklo_epi16(d[7], d[6]);
+
+ s[3] = _mm_unpacklo_epi8(src_67a, src_78a);
+
+ *res = convolve_lowbd_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_2tap_4x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i *coeffs,
+ __m128i d[2], __m128i *res) {
+ d[1] = _mm_cvtsi32_si128(loadu_int32(data + 1 * stride));
+ const __m128i src_01a = _mm_unpacklo_epi32(d[0], d[1]);
+ d[0] = _mm_cvtsi32_si128(loadu_int32(data + 2 * stride));
+ const __m128i src_12a = _mm_unpacklo_epi32(d[1], d[0]);
+
+ const __m128i s = _mm_unpacklo_epi8(src_01a, src_12a);
+
+ *res = _mm_maddubs_epi16(s, coeffs[0]);
+}
+
+static inline void convolve_y_4tap_4x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[2],
+ __m128i d[4], __m128i s[2],
+ __m128i *res) {
+ d[3] = _mm_cvtsi32_si128(loadu_int32(data + 3 * stride));
+ const __m128i src_23a = _mm_unpacklo_epi32(d[2], d[3]);
+ d[2] = _mm_cvtsi32_si128(loadu_int32(data + 4 * stride));
+ const __m128i src_34a = _mm_unpacklo_epi32(d[3], d[2]);
+
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
+
+ *res = convolve_lowbd_4tap_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_6tap_4x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[3],
+ __m128i d[6], __m128i s[3],
+ __m128i *res) {
+ d[5] = _mm_cvtsi32_si128(loadu_int32(data + 5 * stride));
+ const __m128i src_45a = _mm_unpacklo_epi32(d[4], d[5]);
+ d[4] = _mm_cvtsi32_si128(loadu_int32(data + 6 * stride));
+ const __m128i src_56a = _mm_unpacklo_epi32(d[5], d[4]);
+
+ s[2] = _mm_unpacklo_epi8(src_45a, src_56a);
+
+ *res = convolve_lowbd_6tap_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_8tap_4x2_ssse3(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m128i coeffs[4],
+ __m128i d[8], __m128i s[4],
+ __m128i *res) {
+ d[7] = _mm_cvtsi32_si128(loadu_int32(data + 7 * stride));
+ const __m128i src_67a = _mm_unpacklo_epi32(d[6], d[7]);
+ d[6] = _mm_cvtsi32_si128(loadu_int32(data + 8 * stride));
+ const __m128i src_78a = _mm_unpacklo_epi32(d[7], d[6]);
+
+ s[3] = _mm_unpacklo_epi8(src_67a, src_78a);
+
+ res[0] = convolve_lowbd_ssse3(s, coeffs);
+}
+
+static inline void convolve_y_2tap_8x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i *coeffs, __m128i d[2],
+ __m256i *res) {
+ d[1] = _mm_loadu_si128((__m128i *)(data + 1 * stride));
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ d[0] = _mm_loadu_si128((__m128i *)(data + 2 * stride));
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[0]);
+
+ const __m256i s = _mm256_unpacklo_epi8(src_01a, src_12a);
+
+ *res = _mm256_maddubs_epi16(s, coeffs[0]);
+}
+
+static inline void convolve_y_4tap_8x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ __m128i d[4], __m256i s[2],
+ __m256i *res) {
+ d[3] = _mm_loadu_si128((__m128i *)(data + 3 * stride));
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ d[2] = _mm_loadu_si128((__m128i *)(data + 4 * stride));
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[2]);
+
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+
+ *res = convolve_lowbd_4tap(s, coeffs);
+}
+
+static inline void convolve_y_6tap_8x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[3],
+ __m128i d[6], __m256i s[3],
+ __m256i *res) {
+ d[5] = _mm_loadu_si128((__m128i *)(data + 5 * stride));
+ const __m256i src_45a = _mm256_setr_m128i(d[4], d[5]);
+ d[4] = _mm_loadu_si128((__m128i *)(data + 6 * stride));
+ const __m256i src_56a = _mm256_setr_m128i(d[5], d[4]);
+
+ s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
+
+ *res = convolve_lowbd_6tap(s, coeffs);
+}
+
+static inline void convolve_y_8tap_8x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[4],
+ __m128i d[8], __m256i s[4],
+ __m256i *res) {
+ d[7] = _mm_loadu_si128((__m128i *)(data + 7 * stride));
+ const __m256i src_67a = _mm256_setr_m128i(d[6], d[7]);
+ d[6] = _mm_loadu_si128((__m128i *)(data + 8 * stride));
+ const __m256i src_78a = _mm256_setr_m128i(d[7], d[6]);
+
+ s[3] = _mm256_unpacklo_epi8(src_67a, src_78a);
+
+ *res = convolve_lowbd(s, coeffs);
+}
+
+static inline void convolve_y_2tap_16x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i *coeffs,
+ __m128i d[2], __m256i res[2]) {
+ d[1] = _mm_loadu_si128((__m128i *)(data + 1 * stride));
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ d[0] = _mm_loadu_si128((__m128i *)(data + 2 * stride));
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[0]);
+
+ const __m256i s0 = _mm256_unpacklo_epi8(src_01a, src_12a);
+ const __m256i s1 = _mm256_unpackhi_epi8(src_01a, src_12a);
+
+ res[0] = _mm256_maddubs_epi16(s0, coeffs[0]);
+ res[1] = _mm256_maddubs_epi16(s1, coeffs[0]);
+}
+
+static inline void convolve_y_4tap_16x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ __m128i d[4], __m256i s[4],
+ __m256i res[2]) {
+ d[3] = _mm_loadu_si128((__m128i *)(data + 3 * stride));
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ d[2] = _mm_loadu_si128((__m128i *)(data + 4 * stride));
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[2]);
+
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+ s[3] = _mm256_unpackhi_epi8(src_23a, src_34a);
+
+ res[0] = convolve_lowbd_4tap(s, coeffs);
+ res[1] = convolve_lowbd_4tap(s + 2, coeffs);
+}
+
+static inline void convolve_y_6tap_16x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[3],
+ __m128i d[6], __m256i s[6],
+ __m256i res[2]) {
+ d[5] = _mm_loadu_si128((__m128i *)(data + 5 * stride));
+ const __m256i src_45a = _mm256_setr_m128i(d[4], d[5]);
+ d[4] = _mm_loadu_si128((__m128i *)(data + 6 * stride));
+ const __m256i src_56a = _mm256_setr_m128i(d[5], d[4]);
+
+ s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
+ s[5] = _mm256_unpackhi_epi8(src_45a, src_56a);
+
+ res[0] = convolve_lowbd_6tap(s, coeffs);
+ res[1] = convolve_lowbd_6tap(s + 3, coeffs);
+}
+
+static inline void convolve_y_8tap_16x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[4],
+ __m128i d[8], __m256i s[8],
+ __m256i res[2]) {
+ d[7] = _mm_loadu_si128((__m128i *)(data + 7 * stride));
+ const __m256i src_67a = _mm256_setr_m128i(d[6], d[7]);
+ d[6] = _mm_loadu_si128((__m128i *)(data + 8 * stride));
+ const __m256i src_78a = _mm256_setr_m128i(d[7], d[6]);
+
+ s[3] = _mm256_unpacklo_epi8(src_67a, src_78a);
+ s[7] = _mm256_unpackhi_epi8(src_67a, src_78a);
+
+ res[0] = convolve_lowbd(s, coeffs);
+ res[1] = convolve_lowbd(s + 4, coeffs);
+}
+
+static inline void convolve_y_2tap_32x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i *coeffs,
+ __m256i d[2], __m256i res[4]) {
+ d[1] = _mm256_loadu_si256((__m256i *)(data + 1 * stride));
+ const __m256i s00 = _mm256_unpacklo_epi8(d[0], d[1]);
+ const __m256i s01 = _mm256_unpackhi_epi8(d[0], d[1]);
+ d[0] = _mm256_loadu_si256((__m256i *)(data + 2 * stride));
+ const __m256i s10 = _mm256_unpacklo_epi8(d[1], d[0]);
+ const __m256i s11 = _mm256_unpackhi_epi8(d[1], d[0]);
+
+ res[0] = _mm256_maddubs_epi16(s00, coeffs[0]);
+ res[1] = _mm256_maddubs_epi16(s01, coeffs[0]);
+ res[2] = _mm256_maddubs_epi16(s10, coeffs[0]);
+ res[3] = _mm256_maddubs_epi16(s11, coeffs[0]);
+}
+
+static inline void convolve_y_4tap_32x2_avx2(const uint8_t *const data,
+ const ptrdiff_t stride,
+ const __m256i coeffs[2],
+ __m256i d[4], __m256i s1[4],
+ __m256i s2[4], __m256i res[4]) {
+ d[3] = _mm256_loadu_si256((__m256i *)(data + 3 * stride));
+ s1[1] = _mm256_unpacklo_epi8(d[2], d[3]);
+ s1[3] = _mm256_unpackhi_epi8(d[2], d[3]);
+ d[2] = _mm256_loadu_si256((__m256i *)(data + 4 * stride));
+ s2[1] = _mm256_unpacklo_epi8(d[3], d[2]);
+ s2[3] = _mm256_unpackhi_epi8(d[3], d[2]);
+
+ res[0] = convolve_lowbd_4tap(s1, coeffs);
+ res[1] = convolve_lowbd_4tap(s1 + 2, coeffs);
+ res[2] = convolve_lowbd_4tap(s2, coeffs);
+ res[3] = convolve_lowbd_4tap(s2 + 2, coeffs);
+}
#endif // AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
diff --git a/av1/common/x86/convolve_avx2.c b/av1/common/x86/convolve_avx2.c
index 005d666395..209dee75e5 100644
--- a/av1/common/x86/convolve_avx2.c
+++ b/av1/common/x86/convolve_avx2.c
@@ -13,10 +13,6 @@
#include "config/av1_rtcd.h"
-#if CONFIG_SVT_AV1
-#include "third_party/SVT-AV1/convolve_avx2.h"
-#endif
-
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/x86/convolve_avx2.h"
#include "aom_dsp/x86/convolve_common_intrin.h"
@@ -25,231 +21,502 @@
static inline void av1_convolve_y_sr_general_avx2(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn) {
- // right shift is F-1 because we are already dividing
- // filter co-efficients by 2
- const int right_shift_bits = (FILTER_BITS - 1);
- __m128i right_shift = _mm_cvtsi32_si128(right_shift_bits);
- __m256i right_shift_const = _mm256_set1_epi16((1 << right_shift_bits) >> 1);
-
- __m256i coeffs[6], s[12];
- __m128i d[10];
+ __m128i coeffs_128[4];
+ __m256i coeffs[6];
+ int x = 0, y = h;
int i, vert_tap = get_filter_tap(filter_params_y, subpel_y_qn);
+ assert(vert_tap == 2 || vert_tap == 4 || vert_tap == 6 || vert_tap == 8 ||
+ vert_tap == 12);
+ assert(!(w % 2));
+ assert(!(h % 2));
+
+ const int fo_vert = vert_tap / 2 - 1;
+ const uint8_t *const src_ptr = src - fo_vert * src_stride;
+ const uint8_t *data = src_ptr;
+ uint8_t *dst_ptr = dst;
+
+ if (vert_tap == 2) {
+ if (subpel_y_qn != 8) {
+ if (w <= 4) {
+ prepare_coeffs_2t_ssse3(filter_params_y, subpel_y_qn, coeffs_128);
+ __m128i d[2], res;
+ if (w == 2) {
+ d[0] = _mm_cvtsi32_si128(loadu_int16(data));
- if (vert_tap == 6)
- prepare_coeffs_6t_lowbd(filter_params_y, subpel_y_qn, coeffs);
- else if (vert_tap == 12) {
- prepare_coeffs_12taps(filter_params_y, subpel_y_qn, coeffs);
- } else {
- prepare_coeffs_lowbd(filter_params_y, subpel_y_qn, coeffs);
- }
+ do {
+ convolve_y_2tap_2x2_ssse3(data, src_stride, coeffs_128, d, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_2x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+ } while (y > 0);
+ } else {
+ assert(w == 4);
+ d[0] = _mm_cvtsi32_si128(loadu_int32(data));
- // vert_filt as 4 tap
- if (vert_tap == 4) {
- const int fo_vert = 1;
- const uint8_t *const src_ptr = src - fo_vert * src_stride;
- for (int j = 0; j < w; j += 16) {
- const uint8_t *data = &src_ptr[j];
- d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
- d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
- d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
- d[3] = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
- d[4] = _mm_loadu_si128((__m128i *)(data + 4 * src_stride));
+ do {
+ convolve_y_2tap_4x2_ssse3(data, src_stride, coeffs_128, d, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_4x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+ } while (y > 0);
+ }
+ } else {
+ prepare_coeffs_2t_lowbd(filter_params_y, subpel_y_qn, coeffs);
- // Load lines a and b. Line a to lower 128, line b to upper 128
- const __m256i src_01a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[0]), _mm256_castsi128_si256(d[1]), 0x20);
+ if (w == 8) {
+ __m128i d[2];
+ d[0] = _mm_loadl_epi64((__m128i *)data);
- const __m256i src_12a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[1]), _mm256_castsi128_si256(d[2]), 0x20);
+ do {
+ __m256i res;
+ convolve_y_2tap_8x2_avx2(data, src_stride, coeffs, d, &res);
+ round_pack_store_y_8x2_avx2(res, dst_ptr, dst_stride);
- const __m256i src_23a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[2]), _mm256_castsi128_si256(d[3]), 0x20);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- const __m256i src_34a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[3]), _mm256_castsi128_si256(d[4]), 0x20);
+ } while (y > 0);
- s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
- s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+ } else if (w == 16) {
+ __m128i d[2];
+ d[0] = _mm_loadu_si128((__m128i *)data);
- s[3] = _mm256_unpackhi_epi8(src_01a, src_12a);
- s[4] = _mm256_unpackhi_epi8(src_23a, src_34a);
+ do {
+ __m256i res[2];
+ convolve_y_2tap_16x2_avx2(data, src_stride, coeffs, d, res);
+ round_pack_store_y_16x2_avx2(res, dst_ptr, dst_stride);
- for (i = 0; i < h; i += 2) {
- data = &src_ptr[i * src_stride + j];
- d[5] = _mm_loadu_si128((__m128i *)(data + 5 * src_stride));
- const __m256i src_45a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[4]), _mm256_castsi128_si256(d[5]), 0x20);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+ } while (y > 0);
- d[4] = _mm_loadu_si128((__m128i *)(data + 6 * src_stride));
- const __m256i src_56a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[5]), _mm256_castsi128_si256(d[4]), 0x20);
+ } else {
+ assert(!(w % 32));
- s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
- s[5] = _mm256_unpackhi_epi8(src_45a, src_56a);
+ __m256i d[2];
+ do {
+ data = src_ptr + x;
+ dst_ptr = dst + x;
+ y = h;
- const __m256i res_lo = convolve_lowbd_4tap(s, coeffs + 1);
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_lo = _mm256_sra_epi16(
- _mm256_add_epi16(res_lo, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
+ d[0] = _mm256_loadu_si256((__m256i *)data);
- if (w - j > 8) {
- const __m256i res_hi = convolve_lowbd_4tap(s + 3, coeffs + 1);
+ do {
+ __m256i res[4];
+ convolve_y_2tap_32x2_avx2(data, src_stride, coeffs, d, res);
+ round_pack_store_y_32x2_avx2(res, dst_ptr, dst_stride);
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_hi = _mm256_sra_epi16(
- _mm256_add_epi16(res_hi, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_hi = _mm256_packus_epi16(res_16b_hi, res_16b_hi);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+ } while (y > 0);
- __m256i res_a = _mm256_unpacklo_epi64(res_8b_lo, res_8b_hi);
+ x += 32;
+ } while (x < w);
+ }
+ }
+ } else {
+ if (w <= 16) {
+ __m128i s[2], res;
- const __m128i res_0 = _mm256_castsi256_si128(res_a);
- const __m128i res_1 = _mm256_extracti128_si256(res_a, 1);
+ if (w == 2) {
+ s[0] = _mm_cvtsi32_si128(*(int16_t *)data);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
+ do {
+ s[1] = _mm_cvtsi32_si128(*(int16_t *)(data + src_stride));
+ res = _mm_avg_epu8(s[0], s[1]);
+ *(int16_t *)dst_ptr = (int16_t)_mm_cvtsi128_si32(res);
+ s[0] = _mm_cvtsi32_si128(*(int16_t *)(data + 2 * src_stride));
+ res = _mm_avg_epu8(s[1], s[0]);
+ *(int16_t *)(dst_ptr + dst_stride) =
+ (int16_t)_mm_cvtsi128_si32(res);
+
+ data += 2 * src_stride;
+ dst_ptr += 2 * dst_stride;
+ y -= 2;
+ } while (y > 0);
+ } else if (w == 4) {
+ s[0] = _mm_cvtsi32_si128(loadu_int32(data));
+
+ do {
+ s[1] = _mm_cvtsi32_si128(loadu_int32(data + src_stride));
+ res = _mm_avg_epu8(s[0], s[1]);
+ xx_storel_32(dst_ptr, res);
+ s[0] = _mm_cvtsi32_si128(loadu_int32(data + 2 * src_stride));
+ res = _mm_avg_epu8(s[1], s[0]);
+ xx_storel_32(dst_ptr + dst_stride, res);
+
+ data += 2 * src_stride;
+ dst_ptr += 2 * dst_stride;
+ y -= 2;
+ } while (y > 0);
+ } else if (w == 8) {
+ s[0] = _mm_loadl_epi64((__m128i *)data);
+
+ do {
+ s[1] = _mm_loadl_epi64((__m128i *)(data + src_stride));
+ res = _mm_avg_epu8(s[0], s[1]);
+ _mm_storel_epi64((__m128i *)dst_ptr, res);
+ s[0] = _mm_loadl_epi64((__m128i *)(data + 2 * src_stride));
+ res = _mm_avg_epu8(s[1], s[0]);
+ _mm_storel_epi64((__m128i *)(dst_ptr + dst_stride), res);
+
+ data += 2 * src_stride;
+ dst_ptr += 2 * dst_stride;
+ y -= 2;
+ } while (y > 0);
} else {
- const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo);
- const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
- if (w - j > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
- } else if (w - j > 2) {
- xx_storel_32(&dst[i * dst_stride + j], res_0);
- xx_storel_32(&dst[i * dst_stride + j + dst_stride], res_1);
- } else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
- __m128i *const p_1 =
- (__m128i *)&dst[i * dst_stride + j + dst_stride];
- *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
- }
+ assert(w == 16);
+
+ s[0] = _mm_loadu_si128((__m128i *)data);
+
+ do {
+ s[1] = _mm_loadu_si128((__m128i *)(data + src_stride));
+ res = _mm_avg_epu8(s[0], s[1]);
+ _mm_storeu_si128((__m128i *)dst_ptr, res);
+ s[0] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
+ res = _mm_avg_epu8(s[1], s[0]);
+ _mm_storeu_si128((__m128i *)(dst_ptr + dst_stride), res);
+
+ data += 2 * src_stride;
+ dst_ptr += 2 * dst_stride;
+ y -= 2;
+ } while (y > 0);
}
- s[0] = s[1];
- s[1] = s[2];
+ } else {
+ assert(!(w % 32));
- s[3] = s[4];
- s[4] = s[5];
+ __m256i s[2], res;
+ do {
+ data = src_ptr + x;
+ dst_ptr = dst + x;
+ y = h;
+
+ s[0] = _mm256_loadu_si256((__m256i *)data);
+
+ do {
+ s[1] = _mm256_loadu_si256((__m256i *)(data + src_stride));
+ res = _mm256_avg_epu8(s[0], s[1]);
+ _mm256_storeu_si256((__m256i *)dst_ptr, res);
+ s[0] = _mm256_loadu_si256((__m256i *)(data + 2 * src_stride));
+ res = _mm256_avg_epu8(s[1], s[0]);
+ _mm256_storeu_si256((__m256i *)(dst_ptr + dst_stride), res);
+
+ data += 2 * src_stride;
+ dst_ptr += 2 * dst_stride;
+ y -= 2;
+ } while (y > 0);
+
+ x += 32;
+ } while (x < w);
+ }
+ }
+ } else if (vert_tap == 4) {
+ if (w <= 4) {
+ prepare_coeffs_4t_ssse3(filter_params_y, subpel_y_qn, coeffs_128);
+ __m128i d[4], s[2];
+
+ if (w == 2) {
+ d[0] = _mm_cvtsi32_si128(loadu_int16(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int16(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int16(data + 2 * src_stride));
+
+ const __m128i src_01a = _mm_unpacklo_epi16(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi16(d[1], d[2]);
+
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ do {
+ __m128i res;
+ convolve_y_4tap_2x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_2x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ } while (y > 0);
+
+ } else {
+ assert(w == 4);
+
+ d[0] = _mm_cvtsi32_si128(loadu_int32(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int32(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int32(data + 2 * src_stride));
+
+ const __m128i src_01a = _mm_unpacklo_epi32(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi32(d[1], d[2]);
+
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ do {
+ __m128i res;
+ convolve_y_4tap_4x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_4x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ } while (y > 0);
+ }
+ } else {
+ prepare_coeffs_4t_lowbd(filter_params_y, subpel_y_qn, coeffs);
+
+ if (w == 8) {
+ __m128i d[4];
+ __m256i s[2];
+
+ d[0] = _mm_loadl_epi64((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadl_epi64((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadl_epi64((__m128i *)(data + 2 * src_stride));
+
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ do {
+ __m256i res;
+ convolve_y_4tap_8x2_avx2(data, src_stride, coeffs, d, s, &res);
+ round_pack_store_y_8x2_avx2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ } while (y > 0);
+ } else if (w == 16) {
+ __m128i d[4];
+ __m256i s[4];
+
+ d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
+
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ s[2] = _mm256_unpackhi_epi8(src_01a, src_12a);
+
+ do {
+ __m256i res[2];
+ convolve_y_4tap_16x2_avx2(data, src_stride, coeffs, d, s, res);
+ round_pack_store_y_16x2_avx2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ s[2] = s[3];
+ } while (y > 0);
+ } else {
+ assert(!(w % 32));
+
+ __m256i d[4], s1[4], s2[4];
+ do {
+ data = src_ptr + x;
+ dst_ptr = dst + x;
+ y = h;
+
+ d[0] = _mm256_loadu_si256((__m256i *)(data + 0 * src_stride));
+ d[1] = _mm256_loadu_si256((__m256i *)(data + 1 * src_stride));
+ d[2] = _mm256_loadu_si256((__m256i *)(data + 2 * src_stride));
+
+ s1[0] = _mm256_unpacklo_epi8(d[0], d[1]);
+ s1[2] = _mm256_unpackhi_epi8(d[0], d[1]);
+
+ s2[0] = _mm256_unpacklo_epi8(d[1], d[2]);
+ s2[2] = _mm256_unpackhi_epi8(d[1], d[2]);
+
+ do {
+ __m256i res[4];
+ convolve_y_4tap_32x2_avx2(data, src_stride, coeffs, d, s1, s2, res);
+ round_pack_store_y_32x2_avx2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s1[0] = s1[1];
+ s1[2] = s1[3];
+
+ s2[0] = s2[1];
+ s2[2] = s2[3];
+ } while (y > 0);
+
+ x += 32;
+ } while (x < w);
}
}
} else if (vert_tap == 6) {
- const int fo_vert = vert_tap / 2 - 1;
- const uint8_t *const src_ptr = src - fo_vert * src_stride;
+ if (w <= 4) {
+ prepare_coeffs_6t_ssse3(filter_params_y, subpel_y_qn, coeffs_128);
- for (int j = 0; j < w; j += 16) {
- const uint8_t *data = &src_ptr[j];
- __m256i src6;
+ __m128i d[6], s[3];
+ if (w == 2) {
+ d[0] = _mm_cvtsi32_si128(loadu_int16(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int16(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int16(data + 2 * src_stride));
+ d[3] = _mm_cvtsi32_si128(loadu_int16(data + 3 * src_stride));
+ d[4] = _mm_cvtsi32_si128(loadu_int16(data + 4 * src_stride));
- d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
- d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
- d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
- d[3] = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
- // Load lines a and b. Line a to lower 128, line b to upper 128
- const __m256i src_01a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[0]), _mm256_castsi128_si256(d[1]), 0x20);
+ const __m128i src_01a = _mm_unpacklo_epi16(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi16(d[1], d[2]);
+ const __m128i src_23a = _mm_unpacklo_epi16(d[2], d[3]);
+ const __m128i src_34a = _mm_unpacklo_epi16(d[3], d[4]);
- const __m256i src_12a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[1]), _mm256_castsi128_si256(d[2]), 0x20);
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
- const __m256i src_23a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[2]), _mm256_castsi128_si256(d[3]), 0x20);
+ do {
+ __m128i res;
+ convolve_y_6tap_2x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_2x2_sse2(res, dst_ptr, dst_stride);
- src6 = _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 4 * src_stride)));
- const __m256i src_34a =
- _mm256_permute2x128_si256(_mm256_castsi128_si256(d[3]), src6, 0x20);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
- s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+ s[0] = s[1];
+ s[1] = s[2];
+ } while (y > 0);
- s[3] = _mm256_unpackhi_epi8(src_01a, src_12a);
- s[4] = _mm256_unpackhi_epi8(src_23a, src_34a);
+ } else {
+ assert(w == 4);
+ d[0] = _mm_cvtsi32_si128(loadu_int32(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int32(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int32(data + 2 * src_stride));
+ d[3] = _mm_cvtsi32_si128(loadu_int32(data + 3 * src_stride));
+ d[4] = _mm_cvtsi32_si128(loadu_int32(data + 4 * src_stride));
- for (i = 0; i < h; i += 2) {
- data = &src_ptr[i * src_stride + j];
- const __m256i src_45a = _mm256_permute2x128_si256(
- src6,
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
- 0x20);
+ const __m128i src_01a = _mm_unpacklo_epi32(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi32(d[1], d[2]);
+ const __m128i src_23a = _mm_unpacklo_epi32(d[2], d[3]);
+ const __m128i src_34a = _mm_unpacklo_epi32(d[3], d[4]);
- src6 = _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
- const __m256i src_56a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
- src6, 0x20);
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
- s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
- s[5] = _mm256_unpackhi_epi8(src_45a, src_56a);
+ do {
+ __m128i res;
+ convolve_y_6tap_4x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_4x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ s[1] = s[2];
+ } while (y > 0);
+ }
+ } else {
+ prepare_coeffs_6t_lowbd(filter_params_y, subpel_y_qn, coeffs);
- const __m256i res_lo = convolve_lowbd_6tap(s, coeffs);
+ if (w == 8) {
+ __m128i d[6];
+ __m256i s[3];
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_lo = _mm256_sra_epi16(
- _mm256_add_epi16(res_lo, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
+ d[0] = _mm_loadl_epi64((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadl_epi64((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadl_epi64((__m128i *)(data + 2 * src_stride));
+ d[3] = _mm_loadl_epi64((__m128i *)(data + 3 * src_stride));
+ d[4] = _mm_loadl_epi64((__m128i *)(data + 4 * src_stride));
- if (w - j > 8) {
- const __m256i res_hi = convolve_lowbd_6tap(s + 3, coeffs);
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[4]);
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_hi = _mm256_sra_epi16(
- _mm256_add_epi16(res_hi, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_hi = _mm256_packus_epi16(res_16b_hi, res_16b_hi);
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
- __m256i res_a = _mm256_unpacklo_epi64(res_8b_lo, res_8b_hi);
+ do {
+ __m256i res;
+ convolve_y_6tap_8x2_avx2(data, src_stride, coeffs, d, s, &res);
+ round_pack_store_y_8x2_avx2(res, dst_ptr, dst_stride);
- const __m128i res_0 = _mm256_castsi256_si128(res_a);
- const __m128i res_1 = _mm256_extracti128_si256(res_a, 1);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
- } else {
- const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo);
- const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
- if (w - j > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
- } else if (w - j > 2) {
- xx_storel_32(&dst[i * dst_stride + j], res_0);
- xx_storel_32(&dst[i * dst_stride + j + dst_stride], res_1);
- } else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
- __m128i *const p_1 =
- (__m128i *)&dst[i * dst_stride + j + dst_stride];
- *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
- }
- }
- s[0] = s[1];
- s[1] = s[2];
- s[3] = s[4];
- s[4] = s[5];
+ s[0] = s[1];
+ s[1] = s[2];
+ } while (y > 0);
+
+ } else {
+ assert(!(w % 16));
+
+ __m128i d[6];
+ __m256i s[6];
+ do {
+ data = src_ptr + x;
+ dst_ptr = dst + x;
+ y = h;
+
+ d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
+ d[3] = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
+ d[4] = _mm_loadu_si128((__m128i *)(data + 4 * src_stride));
+
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[4]);
+
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+
+ s[3] = _mm256_unpackhi_epi8(src_01a, src_12a);
+ s[4] = _mm256_unpackhi_epi8(src_23a, src_34a);
+
+ do {
+ __m256i res[2];
+ convolve_y_6tap_16x2_avx2(data, src_stride, coeffs, d, s, res);
+ round_pack_store_y_16x2_avx2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ s[1] = s[2];
+
+ s[3] = s[4];
+ s[4] = s[5];
+ } while (y > 0);
+
+ x += 16;
+ } while (x < w);
}
}
} else if (vert_tap == 12) { // vert_tap == 12
- const int fo_vert = filter_params_y->taps / 2 - 1;
- const uint8_t *const src_ptr = src - fo_vert * src_stride;
+ __m128i d[12];
+ __m256i s[12];
+ prepare_coeffs_12taps(filter_params_y, subpel_y_qn, coeffs);
const __m256i v_zero = _mm256_setzero_si256();
- right_shift = _mm_cvtsi32_si128(FILTER_BITS);
- right_shift_const = _mm256_set1_epi32((1 << FILTER_BITS) >> 1);
+ __m128i right_shift = _mm_cvtsi32_si128(FILTER_BITS);
+ __m256i right_shift_const = _mm256_set1_epi32((1 << FILTER_BITS) >> 1);
for (int j = 0; j < w; j += 8) {
- const uint8_t *data = &src_ptr[j];
+ data = &src_ptr[j];
__m256i src10;
d[0] = _mm_loadl_epi64((__m128i *)(data + 0 * src_stride));
@@ -393,118 +660,175 @@ static inline void av1_convolve_y_sr_general_avx2(
}
}
} else {
- const int fo_vert = filter_params_y->taps / 2 - 1;
- const uint8_t *const src_ptr = src - fo_vert * src_stride;
-
- for (int j = 0; j < w; j += 16) {
- const uint8_t *data = &src_ptr[j];
- __m256i src6;
-
- d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
- d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
- d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
- d[3] = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
- d[4] = _mm_loadu_si128((__m128i *)(data + 4 * src_stride));
- d[5] = _mm_loadu_si128((__m128i *)(data + 5 * src_stride));
- // Load lines a and b. Line a to lower 128, line b to upper 128
- const __m256i src_01a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[0]), _mm256_castsi128_si256(d[1]), 0x20);
+ assert(vert_tap == 8);
- const __m256i src_12a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[1]), _mm256_castsi128_si256(d[2]), 0x20);
-
- const __m256i src_23a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[2]), _mm256_castsi128_si256(d[3]), 0x20);
+ if (w <= 4) {
+ prepare_coeffs_ssse3(filter_params_y, subpel_y_qn, coeffs_128);
- const __m256i src_34a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[3]), _mm256_castsi128_si256(d[4]), 0x20);
+ __m128i d[8], s[4], res;
+ if (w == 2) {
+ d[0] = _mm_cvtsi32_si128(loadu_int16(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int16(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int16(data + 2 * src_stride));
+ d[3] = _mm_cvtsi32_si128(loadu_int16(data + 3 * src_stride));
+ d[4] = _mm_cvtsi32_si128(loadu_int16(data + 4 * src_stride));
+ d[5] = _mm_cvtsi32_si128(loadu_int16(data + 5 * src_stride));
+ d[6] = _mm_cvtsi32_si128(loadu_int16(data + 6 * src_stride));
+
+ const __m128i src_01a = _mm_unpacklo_epi16(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi16(d[1], d[2]);
+ const __m128i src_23a = _mm_unpacklo_epi16(d[2], d[3]);
+ const __m128i src_34a = _mm_unpacklo_epi16(d[3], d[4]);
+ const __m128i src_45a = _mm_unpacklo_epi16(d[4], d[5]);
+ const __m128i src_56a = _mm_unpacklo_epi16(d[5], d[6]);
+
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
+ s[2] = _mm_unpacklo_epi8(src_45a, src_56a);
- const __m256i src_45a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(d[4]), _mm256_castsi128_si256(d[5]), 0x20);
+ do {
+ convolve_y_8tap_2x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_2x2_sse2(res, dst_ptr, dst_stride);
- src6 = _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
- const __m256i src_56a =
- _mm256_permute2x128_si256(_mm256_castsi128_si256(d[5]), src6, 0x20);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
- s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
- s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
+ s[0] = s[1];
+ s[1] = s[2];
+ s[2] = s[3];
+ } while (y > 0);
- s[4] = _mm256_unpackhi_epi8(src_01a, src_12a);
- s[5] = _mm256_unpackhi_epi8(src_23a, src_34a);
- s[6] = _mm256_unpackhi_epi8(src_45a, src_56a);
+ } else {
+ assert(w == 4);
+
+ d[0] = _mm_cvtsi32_si128(loadu_int32(data + 0 * src_stride));
+ d[1] = _mm_cvtsi32_si128(loadu_int32(data + 1 * src_stride));
+ d[2] = _mm_cvtsi32_si128(loadu_int32(data + 2 * src_stride));
+ d[3] = _mm_cvtsi32_si128(loadu_int32(data + 3 * src_stride));
+ d[4] = _mm_cvtsi32_si128(loadu_int32(data + 4 * src_stride));
+ d[5] = _mm_cvtsi32_si128(loadu_int32(data + 5 * src_stride));
+ d[6] = _mm_cvtsi32_si128(loadu_int32(data + 6 * src_stride));
+
+ const __m128i src_01a = _mm_unpacklo_epi32(d[0], d[1]);
+ const __m128i src_12a = _mm_unpacklo_epi32(d[1], d[2]);
+ const __m128i src_23a = _mm_unpacklo_epi32(d[2], d[3]);
+ const __m128i src_34a = _mm_unpacklo_epi32(d[3], d[4]);
+ const __m128i src_45a = _mm_unpacklo_epi32(d[4], d[5]);
+ const __m128i src_56a = _mm_unpacklo_epi32(d[5], d[6]);
+
+ s[0] = _mm_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm_unpacklo_epi8(src_23a, src_34a);
+ s[2] = _mm_unpacklo_epi8(src_45a, src_56a);
- for (i = 0; i < h; i += 2) {
- data = &src_ptr[i * src_stride + j];
- const __m256i src_67a = _mm256_permute2x128_si256(
- src6,
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
- 0x20);
+ do {
+ convolve_y_8tap_4x2_ssse3(data, src_stride, coeffs_128, d, s, &res);
+ res = round_sr_y_ssse3(res);
+ pack_store_u8_4x2_sse2(res, dst_ptr, dst_stride);
+
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
+
+ s[0] = s[1];
+ s[1] = s[2];
+ s[2] = s[3];
+ } while (y > 0);
+ }
+ } else {
+ prepare_coeffs_lowbd(filter_params_y, subpel_y_qn, coeffs);
+
+ if (w == 8) {
+ __m128i d[8];
+ __m256i s[4];
+
+ d[0] = _mm_loadl_epi64((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadl_epi64((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadl_epi64((__m128i *)(data + 2 * src_stride));
+ d[3] = _mm_loadl_epi64((__m128i *)(data + 3 * src_stride));
+ d[4] = _mm_loadl_epi64((__m128i *)(data + 4 * src_stride));
+ d[5] = _mm_loadl_epi64((__m128i *)(data + 5 * src_stride));
+ d[6] = _mm_loadl_epi64((__m128i *)(data + 6 * src_stride));
+
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[4]);
+ const __m256i src_45a = _mm256_setr_m128i(d[4], d[5]);
+ const __m256i src_56a = _mm256_setr_m128i(d[5], d[6]);
+
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+ s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
- src6 = _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 8 * src_stride)));
- const __m256i src_78a = _mm256_permute2x128_si256(
- _mm256_castsi128_si256(
- _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
- src6, 0x20);
+ do {
+ __m256i res;
+ convolve_y_8tap_8x2_avx2(data, src_stride, coeffs, d, s, &res);
+ round_pack_store_y_8x2_avx2(res, dst_ptr, dst_stride);
- s[3] = _mm256_unpacklo_epi8(src_67a, src_78a);
- s[7] = _mm256_unpackhi_epi8(src_67a, src_78a);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- const __m256i res_lo = convolve_lowbd(s, coeffs);
+ s[0] = s[1];
+ s[1] = s[2];
+ s[2] = s[3];
+ } while (y > 0);
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_lo = _mm256_sra_epi16(
- _mm256_add_epi16(res_lo, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
+ } else {
+ assert(!(w % 16));
- if (w - j > 8) {
- const __m256i res_hi = convolve_lowbd(s + 4, coeffs);
+ __m128i d[8];
+ __m256i s[8];
+ do {
+ data = src_ptr + x;
+ dst_ptr = dst + x;
+ y = h;
+
+ d[0] = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
+ d[1] = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
+ d[2] = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
+ d[3] = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
+ d[4] = _mm_loadu_si128((__m128i *)(data + 4 * src_stride));
+ d[5] = _mm_loadu_si128((__m128i *)(data + 5 * src_stride));
+ d[6] = _mm_loadu_si128((__m128i *)(data + 6 * src_stride));
+
+ const __m256i src_01a = _mm256_setr_m128i(d[0], d[1]);
+ const __m256i src_12a = _mm256_setr_m128i(d[1], d[2]);
+ const __m256i src_23a = _mm256_setr_m128i(d[2], d[3]);
+ const __m256i src_34a = _mm256_setr_m128i(d[3], d[4]);
+ const __m256i src_45a = _mm256_setr_m128i(d[4], d[5]);
+ const __m256i src_56a = _mm256_setr_m128i(d[5], d[6]);
+
+ s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+ s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+ s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
+
+ s[4] = _mm256_unpackhi_epi8(src_01a, src_12a);
+ s[5] = _mm256_unpackhi_epi8(src_23a, src_34a);
+ s[6] = _mm256_unpackhi_epi8(src_45a, src_56a);
- /* rounding code */
- // shift by F - 1
- const __m256i res_16b_hi = _mm256_sra_epi16(
- _mm256_add_epi16(res_hi, right_shift_const), right_shift);
- // 8 bit conversion and saturation to uint8
- __m256i res_8b_hi = _mm256_packus_epi16(res_16b_hi, res_16b_hi);
+ do {
+ __m256i res[2];
+ convolve_y_8tap_16x2_avx2(data, src_stride, coeffs, d, s, res);
+ round_pack_store_y_16x2_avx2(res, dst_ptr, dst_stride);
- __m256i res_a = _mm256_unpacklo_epi64(res_8b_lo, res_8b_hi);
+ dst_ptr += 2 * dst_stride;
+ data += 2 * src_stride;
+ y -= 2;
- const __m128i res_0 = _mm256_castsi256_si128(res_a);
- const __m128i res_1 = _mm256_extracti128_si256(res_a, 1);
+ s[0] = s[1];
+ s[1] = s[2];
+ s[2] = s[3];
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
- } else {
- const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo);
- const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
- if (w - j > 4) {
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0);
- _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
- res_1);
- } else if (w - j > 2) {
- xx_storel_32(&dst[i * dst_stride + j], res_0);
- xx_storel_32(&dst[i * dst_stride + j + dst_stride], res_1);
- } else {
- __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
- __m128i *const p_1 =
- (__m128i *)&dst[i * dst_stride + j + dst_stride];
- *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
- *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
- }
- }
- s[0] = s[1];
- s[1] = s[2];
- s[2] = s[3];
+ s[4] = s[5];
+ s[5] = s[6];
+ s[6] = s[7];
+ } while (y > 0);
- s[4] = s[5];
- s[5] = s[6];
- s[6] = s[7];
+ x += 16;
+ } while (x < w);
}
}
}
@@ -515,20 +839,8 @@ void av1_convolve_y_sr_avx2(const uint8_t *src, int32_t src_stride,
int32_t h,
const InterpFilterParams *filter_params_y,
const int32_t subpel_y_qn) {
-#if CONFIG_SVT_AV1
- const int vert_tap = get_filter_tap(filter_params_y, subpel_y_qn);
-
- if (vert_tap == 12) {
- av1_convolve_y_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_y, subpel_y_qn);
- } else {
- av1_convolve_y_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
- filter_params_y, subpel_y_qn);
- }
-#else
av1_convolve_y_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
-#endif
}
static inline void av1_convolve_x_sr_general_avx2(
@@ -562,7 +874,7 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i res =
convolve_x_4tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
const __m128i reg = round_sr_x_ssse3(res);
- pack_store_x_2x2_sse2(reg, dst, dst_stride);
+ pack_store_u8_2x2_sse2(reg, dst, dst_stride);
src_ptr += 2 * src_stride;
dst += 2 * dst_stride;
h -= 2;
@@ -573,7 +885,7 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i reg =
convolve_x_4tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
const __m128i res = round_sr_x_ssse3(reg);
- pack_store_x_4x2_sse2(res, dst, dst_stride);
+ pack_store_u8_4x2_sse2(res, dst, dst_stride);
src_ptr += 2 * src_stride;
dst += 2 * dst_stride;
h -= 2;
@@ -920,7 +1232,7 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i data =
convolve_x_2tap_2x2_ssse3(src_ptr, src_stride, coeffs_128);
const __m128i reg = round_sr_x_ssse3(data);
- pack_store_x_2x2_sse2(reg, dst, dst_stride);
+ pack_store_u8_2x2_sse2(reg, dst, dst_stride);
src_ptr += 2 * src_stride;
dst += 2 * dst_stride;
h -= 2;
@@ -930,7 +1242,7 @@ static inline void av1_convolve_x_sr_general_avx2(
const __m128i data =
convolve_x_2tap_4x2_ssse3(src_ptr, src_stride, coeffs_128);
const __m128i reg = round_sr_x_ssse3(data);
- pack_store_x_4x2_sse2(reg, dst, dst_stride);
+ pack_store_u8_4x2_sse2(reg, dst, dst_stride);
src_ptr += 2 * src_stride;
dst += 2 * dst_stride;
h -= 2;