Spaces:
Runtime error
Runtime error
/* | |
* Copyright (c) 2021 Loongson Technology Corporation Limited | |
* Contributed by Hecai Yuan <[email protected]> | |
* | |
* This file is part of FFmpeg. | |
* | |
* FFmpeg is free software; you can redistribute it and/or | |
* modify it under the terms of the GNU Lesser General Public | |
* License as published by the Free Software Foundation; either | |
* version 2.1 of the License, or (at your option) any later version. | |
* | |
* FFmpeg is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
* Lesser General Public License for more details. | |
* | |
* You should have received a copy of the GNU Lesser General Public | |
* License along with FFmpeg; if not, write to the Free Software | |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
*/ | |
void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in, | |
int limit_in, int thresh_in) | |
{ | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
__m128i mask, hev, flat, thresh, limit, b_limit; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
b_limit = __lsx_vreplgr2vr_b(b_limit_in); | |
limit = __lsx_vreplgr2vr_b(limit_in); | |
thresh = __lsx_vreplgr2vr_b(thresh_in); | |
/*load vector elements*/ | |
DUP4_ARG2(__lsx_vld, dst - stride4, 0, dst - stride3, 0, dst - stride2, 0, | |
dst - stride, 0, p3, p2, p1, p0); | |
DUP4_ARG2(__lsx_vld, dst, 0, dst + stride, 0, dst + stride2, 0, dst + stride3, 0, | |
q0, q1, q2, q3); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); | |
VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); | |
/*store vector elements*/ | |
__lsx_vst(p2, dst - stride3, 0); | |
__lsx_vst(p1, dst - stride2, 0); | |
__lsx_vst(p0, dst - stride, 0); | |
__lsx_vst(q0, dst, 0); | |
__lsx_vst(q1, dst + stride, 0); | |
__lsx_vst(q2, dst + stride2, 0); | |
} | |
void ff_vp8_v_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v, | |
ptrdiff_t stride, int b_limit_in, | |
int limit_in, int thresh_in) | |
{ | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
__m128i mask, hev, flat, thresh, limit, b_limit; | |
__m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; | |
__m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
b_limit = __lsx_vreplgr2vr_b(b_limit_in); | |
limit = __lsx_vreplgr2vr_b(limit_in); | |
thresh = __lsx_vreplgr2vr_b(thresh_in); | |
DUP4_ARG2(__lsx_vld, dst_u - stride4, 0, dst_u - stride3, 0, dst_u - stride2, 0, | |
dst_u - stride, 0, p3_u, p2_u, p1_u, p0_u); | |
DUP4_ARG2(__lsx_vld, dst_u, 0, dst_u + stride, 0, dst_u + stride2, 0, | |
dst_u + stride3, 0, q0_u, q1_u, q2_u, q3_u); | |
DUP4_ARG2(__lsx_vld, dst_v - stride4, 0, dst_v - stride3, 0, dst_v - stride2, 0, | |
dst_v - stride, 0, p3_v, p2_v, p1_v, p0_v); | |
DUP4_ARG2(__lsx_vld, dst_v, 0, dst_v + stride, 0, dst_v + stride2, 0, | |
dst_v + stride3, 0, q0_v, q1_v, q2_v, q3_v); | |
/* rht 8 element of p3 are u pixel and left 8 element of p3 are v pixei */ | |
DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); | |
DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, | |
hev, mask, flat); | |
VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); | |
__lsx_vstelm_d(p2, dst_u - stride3, 0, 0); | |
__lsx_vstelm_d(p1, dst_u - stride2, 0, 0); | |
__lsx_vstelm_d(p0, dst_u - stride , 0, 0); | |
__lsx_vstelm_d(q0, dst_u, 0, 0); | |
__lsx_vstelm_d(q1, dst_u + stride, 0, 0); | |
__lsx_vstelm_d(q2, dst_u + stride2, 0, 0); | |
__lsx_vstelm_d(p2, dst_v - stride3, 0, 1); | |
__lsx_vstelm_d(p1, dst_v - stride2, 0, 1); | |
__lsx_vstelm_d(p0, dst_v - stride , 0, 1); | |
__lsx_vstelm_d(q0, dst_v, 0, 1); | |
__lsx_vstelm_d(q1, dst_v + stride, 0, 1); | |
__lsx_vstelm_d(q2, dst_v + stride2, 0, 1); | |
} | |
void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in, | |
int limit_in, int thresh_in) | |
{ | |
uint8_t *temp_src; | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
__m128i mask, hev, flat, thresh, limit, b_limit; | |
__m128i row0, row1, row2, row3, row4, row5, row6, row7, row8; | |
__m128i row9, row10, row11, row12, row13, row14, row15; | |
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
b_limit = __lsx_vreplgr2vr_b(b_limit_in); | |
limit = __lsx_vreplgr2vr_b(limit_in); | |
thresh = __lsx_vreplgr2vr_b(thresh_in); | |
temp_src = dst - 4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row0, row1, row2, row3); | |
temp_src += stride4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row4, row5, row6, row7); | |
temp_src += stride4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row8, row9, row10, row11); | |
temp_src += stride4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row12, row13, row14, row15); | |
LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, | |
row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); | |
VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); | |
tmp0 = __lsx_vilvl_b(p1, p2); | |
tmp1 = __lsx_vilvl_b(q0, p0); | |
tmp3 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp4 = __lsx_vilvh_h(tmp1, tmp0); | |
tmp0 = __lsx_vilvh_b(p1, p2); | |
tmp1 = __lsx_vilvh_b(q0, p0); | |
tmp6 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp7 = __lsx_vilvh_h(tmp1, tmp0); | |
tmp2 = __lsx_vilvl_b(q2, q1); | |
tmp5 = __lsx_vilvh_b(q2, q1); | |
temp_src = dst - 3; | |
VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); | |
temp_src += stride; | |
VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4); | |
} | |
void ff_vp8_h_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v, | |
ptrdiff_t stride, int b_limit_in, | |
int limit_in, int thresh_in) | |
{ | |
uint8_t *temp_src; | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
__m128i mask, hev, flat, thresh, limit, b_limit; | |
__m128i row0, row1, row2, row3, row4, row5, row6, row7, row8; | |
__m128i row9, row10, row11, row12, row13, row14, row15; | |
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
b_limit = __lsx_vreplgr2vr_b(b_limit_in); | |
limit = __lsx_vreplgr2vr_b(limit_in); | |
thresh = __lsx_vreplgr2vr_b(thresh_in); | |
temp_src = dst_u - 4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row0, row1, row2, row3); | |
temp_src += stride4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row4, row5, row6, row7); | |
temp_src = dst_v - 4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row8, row9, row10, row11); | |
temp_src += stride4; | |
DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0, | |
temp_src + stride3, 0, row12, row13, row14, row15); | |
LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, | |
row8, row9, row10, row11, row12, row13, row14, row15, | |
p3, p2, p1, p0, q0, q1, q2, q3); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat); | |
VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); | |
tmp0 = __lsx_vilvl_b(p1, p2); | |
tmp1 = __lsx_vilvl_b(q0, p0); | |
tmp3 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp4 = __lsx_vilvh_h(tmp1, tmp0); | |
tmp0 = __lsx_vilvh_b(p1, p2); | |
tmp1 = __lsx_vilvh_b(q0, p0); | |
tmp6 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp7 = __lsx_vilvh_h(tmp1, tmp0); | |
tmp2 = __lsx_vilvl_b(q2, q1); | |
tmp5 = __lsx_vilvh_b(q2, q1); | |
dst_u -= 3; | |
VP8_ST6x1_UB(tmp3, 0, tmp2, 0, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp3, 1, tmp2, 1, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp3, 2, tmp2, 2, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp3, 3, tmp2, 3, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp4, 0, tmp2, 4, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp4, 1, tmp2, 5, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp4, 2, tmp2, 6, dst_u, 4); | |
dst_u += stride; | |
VP8_ST6x1_UB(tmp4, 3, tmp2, 7, dst_u, 4); | |
dst_v -= 3; | |
VP8_ST6x1_UB(tmp6, 0, tmp5, 0, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp6, 1, tmp5, 1, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp6, 2, tmp5, 2, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp6, 3, tmp5, 3, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp7, 0, tmp5, 4, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp7, 1, tmp5, 5, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp7, 2, tmp5, 6, dst_v, 4); | |
dst_v += stride; | |
VP8_ST6x1_UB(tmp7, 3, tmp5, 7, dst_v, 4); | |
} | |
void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride, | |
int32_t e, int32_t i, int32_t h) | |
{ | |
__m128i mask, hev, flat; | |
__m128i thresh, b_limit, limit; | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
/* load vector elements */ | |
src -= stride4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, p3, p2, p1, p0); | |
src += stride4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, q0, q1, q2, q3); | |
thresh = __lsx_vreplgr2vr_b(h); | |
b_limit = __lsx_vreplgr2vr_b(e); | |
limit = __lsx_vreplgr2vr_b(i); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, | |
hev, mask, flat); | |
VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); | |
__lsx_vst(p1, src - stride2, 0); | |
__lsx_vst(p0, src - stride, 0); | |
__lsx_vst(q0, src, 0); | |
__lsx_vst(q1, src + stride, 0); | |
} | |
void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride, | |
int32_t e, int32_t i, int32_t h) | |
{ | |
__m128i mask, hev, flat; | |
__m128i thresh, b_limit, limit; | |
__m128i p3, p2, p1, p0, q3, q2, q1, q0; | |
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; | |
__m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; | |
ptrdiff_t stride2 = stride << 1; | |
ptrdiff_t stride3 = stride2 + stride; | |
ptrdiff_t stride4 = stride2 << 1; | |
src -= 4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, tmp0, tmp1, tmp2, tmp3); | |
src += stride4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, tmp4, tmp5, tmp6, tmp7); | |
src += stride4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, tmp8, tmp9, tmp10, tmp11); | |
src += stride4; | |
DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0, | |
src + stride3, 0, tmp12, tmp13, tmp14, tmp15); | |
src -= 3 * stride4; | |
LSX_TRANSPOSE16x8_B(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, | |
tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, | |
p3, p2, p1, p0, q0, q1, q2, q3); | |
thresh = __lsx_vreplgr2vr_b(h); | |
b_limit = __lsx_vreplgr2vr_b(e); | |
limit = __lsx_vreplgr2vr_b(i); | |
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, | |
hev, mask, flat); | |
VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); | |
DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1); | |
tmp2 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp3 = __lsx_vilvh_h(tmp1, tmp0); | |
src += 2; | |
ST_W4(tmp2, 0, 1, 2, 3, src, stride); | |
ST_W4(tmp3, 0, 1, 2, 3, src, stride); | |
DUP2_ARG2(__lsx_vilvh_b, p0, p1, q1, q0, tmp0, tmp1); | |
tmp2 = __lsx_vilvl_h(tmp1, tmp0); | |
tmp3 = __lsx_vilvh_h(tmp1, tmp0); | |
ST_W4(tmp2, 0, 1, 2, 3, src, stride); | |
ST_W4(tmp3, 0, 1, 2, 3, src, stride); | |
src -= 4 * stride4; | |
} | |