diff --git a/0001-Add-PPC64-support-for-boringssl.patch b/0001-Add-PPC64-support-for-boringssl.patch new file mode 100644 index 0000000..af6923a --- /dev/null +++ b/0001-Add-PPC64-support-for-boringssl.patch @@ -0,0 +1,8895 @@ +Index: chromium-122.0.6261.57/third_party/boringssl/src/cmake/perlasm.cmake +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/cmake/perlasm.cmake ++++ chromium-122.0.6261.57/third_party/boringssl/src/cmake/perlasm.cmake +@@ -17,6 +17,7 @@ function(add_perlasm_target dest src) + DEPENDS + ${src} + ${PROJECT_SOURCE_DIR}/crypto/perlasm/arm-xlate.pl ++ ${PROJECT_SOURCE_DIR}/crypto/perlasm/ppc-xlate.pl + ${PROJECT_SOURCE_DIR}/crypto/perlasm/x86_64-xlate.pl + ${PROJECT_SOURCE_DIR}/crypto/perlasm/x86asm.pl + ${PROJECT_SOURCE_DIR}/crypto/perlasm/x86gas.pl +@@ -40,6 +41,9 @@ function(perlasm var arch dest src) + add_perlasm_target("${dest}-apple.S" ${src} ios32 ${ARGN}) + add_perlasm_target("${dest}-linux.S" ${src} linux32 ${ARGN}) + append_to_parent_scope("${var}_ASM" "${dest}-apple.S" "${dest}-linux.S") ++ elseif(arch STREQUAL "ppc64le") ++ add_perlasm_target("${dest}-linux.S" ${src} linux64le) ++ append_to_parent_scope("${var}_ASM" "${dest}-linux.S") + elseif(arch STREQUAL "x86") + add_perlasm_target("${dest}-apple.S" ${src} macosx -fPIC -DOPENSSL_IA32_SSE2 ${ARGN}) + add_perlasm_target("${dest}-linux.S" ${src} elf -fPIC -DOPENSSL_IA32_SSE2 ${ARGN}) +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/CMakeLists.txt +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/CMakeLists.txt ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/CMakeLists.txt +@@ -26,6 +26,7 @@ perlasm(CRYPTO_SOURCES aarch64 cipher_ex + perlasm(CRYPTO_SOURCES aarch64 test/trampoline-armv8 test/asm/trampoline-armv8.pl) + perlasm(CRYPTO_SOURCES arm chacha/chacha-armv4 chacha/asm/chacha-armv4.pl) + perlasm(CRYPTO_SOURCES arm test/trampoline-armv4 test/asm/trampoline-armv4.pl) ++perlasm(CRYPTO_SOURCES ppc64le test/trampoline-ppc test/asm/trampoline-ppc.pl) + perlasm(CRYPTO_SOURCES x86 chacha/chacha-x86 chacha/asm/chacha-x86.pl) + perlasm(CRYPTO_SOURCES x86 test/trampoline-x86 test/asm/trampoline-x86.pl) + perlasm(CRYPTO_SOURCES x86_64 chacha/chacha-x86_64 chacha/asm/chacha-x86_64.pl) +@@ -137,6 +138,7 @@ add_library( + cpu_arm_freebsd.c + cpu_arm_linux.c + cpu_intel.c ++ cpu_ppc64le.c + crypto.c + curve25519/curve25519.c + curve25519/curve25519_64_adx.c +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/abi_self_test.cc +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/abi_self_test.cc ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/abi_self_test.cc +@@ -521,3 +521,289 @@ TEST(ABITest, AArch64) { + CHECK_ABI_NO_UNWIND(abi_test_clobber_v15_upper); + } + #endif // OPENSSL_AARCH64 && SUPPORTS_ABI_TEST ++ ++#if defined(OPENSSL_PPC64LE) && defined(SUPPORTS_ABI_TEST) ++extern "C" { ++void abi_test_clobber_r0(void); ++// r1 is the stack pointer. ++void abi_test_clobber_r2(void); ++void abi_test_clobber_r3(void); ++void abi_test_clobber_r4(void); ++void abi_test_clobber_r5(void); ++void abi_test_clobber_r6(void); ++void abi_test_clobber_r7(void); ++void abi_test_clobber_r8(void); ++void abi_test_clobber_r9(void); ++void abi_test_clobber_r10(void); ++void abi_test_clobber_r11(void); ++void abi_test_clobber_r12(void); ++// r13 is the thread pointer. ++void abi_test_clobber_r14(void); ++void abi_test_clobber_r15(void); ++void abi_test_clobber_r16(void); ++void abi_test_clobber_r17(void); ++void abi_test_clobber_r18(void); ++void abi_test_clobber_r19(void); ++void abi_test_clobber_r20(void); ++void abi_test_clobber_r21(void); ++void abi_test_clobber_r22(void); ++void abi_test_clobber_r23(void); ++void abi_test_clobber_r24(void); ++void abi_test_clobber_r25(void); ++void abi_test_clobber_r26(void); ++void abi_test_clobber_r27(void); ++void abi_test_clobber_r28(void); ++void abi_test_clobber_r29(void); ++void abi_test_clobber_r30(void); ++void abi_test_clobber_r31(void); ++ ++void abi_test_clobber_f0(void); ++void abi_test_clobber_f1(void); ++void abi_test_clobber_f2(void); ++void abi_test_clobber_f3(void); ++void abi_test_clobber_f4(void); ++void abi_test_clobber_f5(void); ++void abi_test_clobber_f6(void); ++void abi_test_clobber_f7(void); ++void abi_test_clobber_f8(void); ++void abi_test_clobber_f9(void); ++void abi_test_clobber_f10(void); ++void abi_test_clobber_f11(void); ++void abi_test_clobber_f12(void); ++void abi_test_clobber_f13(void); ++void abi_test_clobber_f14(void); ++void abi_test_clobber_f15(void); ++void abi_test_clobber_f16(void); ++void abi_test_clobber_f17(void); ++void abi_test_clobber_f18(void); ++void abi_test_clobber_f19(void); ++void abi_test_clobber_f20(void); ++void abi_test_clobber_f21(void); ++void abi_test_clobber_f22(void); ++void abi_test_clobber_f23(void); ++void abi_test_clobber_f24(void); ++void abi_test_clobber_f25(void); ++void abi_test_clobber_f26(void); ++void abi_test_clobber_f27(void); ++void abi_test_clobber_f28(void); ++void abi_test_clobber_f29(void); ++void abi_test_clobber_f30(void); ++void abi_test_clobber_f31(void); ++ ++void abi_test_clobber_v0(void); ++void abi_test_clobber_v1(void); ++void abi_test_clobber_v2(void); ++void abi_test_clobber_v3(void); ++void abi_test_clobber_v4(void); ++void abi_test_clobber_v5(void); ++void abi_test_clobber_v6(void); ++void abi_test_clobber_v7(void); ++void abi_test_clobber_v8(void); ++void abi_test_clobber_v9(void); ++void abi_test_clobber_v10(void); ++void abi_test_clobber_v11(void); ++void abi_test_clobber_v12(void); ++void abi_test_clobber_v13(void); ++void abi_test_clobber_v14(void); ++void abi_test_clobber_v15(void); ++void abi_test_clobber_v16(void); ++void abi_test_clobber_v17(void); ++void abi_test_clobber_v18(void); ++void abi_test_clobber_v19(void); ++void abi_test_clobber_v20(void); ++void abi_test_clobber_v21(void); ++void abi_test_clobber_v22(void); ++void abi_test_clobber_v23(void); ++void abi_test_clobber_v24(void); ++void abi_test_clobber_v25(void); ++void abi_test_clobber_v26(void); ++void abi_test_clobber_v27(void); ++void abi_test_clobber_v28(void); ++void abi_test_clobber_v29(void); ++void abi_test_clobber_v30(void); ++void abi_test_clobber_v31(void); ++ ++void abi_test_clobber_cr0(void); ++void abi_test_clobber_cr1(void); ++void abi_test_clobber_cr2(void); ++void abi_test_clobber_cr3(void); ++void abi_test_clobber_cr4(void); ++void abi_test_clobber_cr5(void); ++void abi_test_clobber_cr6(void); ++void abi_test_clobber_cr7(void); ++ ++void abi_test_clobber_ctr(void); ++void abi_test_clobber_lr(void); ++ ++} // extern "C" ++ ++TEST(ABITest, PPC64LE) { ++ // abi_test_trampoline hides unsaved registers from the caller, so we can ++ // safely call the abi_test_clobber_* functions below. ++ abi_test::internal::CallerState state; ++ RAND_bytes(reinterpret_cast(&state), sizeof(state)); ++ CHECK_ABI_NO_UNWIND(abi_test_trampoline, ++ reinterpret_cast(abi_test_clobber_r14), ++ &state, nullptr, 0, 0 /* no breakpoint */); ++ ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r0); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r2); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r3); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r4); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r5); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r6); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r7); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r8); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r9); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r10); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r11); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_r12); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r14), ++ "r14 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r15), ++ "r15 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r16), ++ "r16 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r17), ++ "r17 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r18), ++ "r18 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r19), ++ "r19 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r20), ++ "r20 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r21), ++ "r21 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r22), ++ "r22 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r23), ++ "r23 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r24), ++ "r24 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r25), ++ "r25 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r26), ++ "r26 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r27), ++ "r27 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r28), ++ "r28 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r29), ++ "r29 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r30), ++ "r30 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_r31), ++ "r31 was not restored after return"); ++ ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f0); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f1); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f2); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f3); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f4); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f5); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f6); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f7); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f8); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f9); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f10); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f11); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f12); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_f13); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f14), ++ "f14 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f15), ++ "f15 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f16), ++ "f16 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f17), ++ "f17 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f18), ++ "f18 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f19), ++ "f19 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f20), ++ "f20 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f21), ++ "f21 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f22), ++ "f22 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f23), ++ "f23 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f24), ++ "f24 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f25), ++ "f25 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f26), ++ "f26 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f27), ++ "f27 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f28), ++ "f28 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f29), ++ "f29 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f30), ++ "f30 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_f31), ++ "f31 was not restored after return"); ++ ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v0); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v1); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v2); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v3); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v4); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v5); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v6); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v7); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v8); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v9); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v10); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v11); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v12); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v13); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v14); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v15); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v16); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v17); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v18); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_v19); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v20), ++ "v20 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v21), ++ "v21 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v22), ++ "v22 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v23), ++ "v23 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v24), ++ "v24 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v25), ++ "v25 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v26), ++ "v26 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v27), ++ "v27 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v28), ++ "v28 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v29), ++ "v29 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v30), ++ "v30 was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_v31), ++ "v31 was not restored after return"); ++ ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_cr0); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_cr1); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_cr2), ++ "cr was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_cr3), ++ "cr was not restored after return"); ++ EXPECT_NONFATAL_FAILURE(CHECK_ABI_NO_UNWIND(abi_test_clobber_cr4), ++ "cr was not restored after return"); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_cr5); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_cr6); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_cr7); ++ ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_ctr); ++ CHECK_ABI_NO_UNWIND(abi_test_clobber_lr); ++} ++#endif // OPENSSL_PPC64LE && SUPPORTS_ABI_TEST +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/cpu_ppc64le.c +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/cpu_ppc64le.c +@@ -0,0 +1,38 @@ ++/* Copyright (c) 2016, Google Inc. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ ++ ++#include ++ ++#if defined(OPENSSL_PPC64LE) ++ ++#include ++ ++#include "internal.h" ++ ++ ++#if !defined(PPC_FEATURE2_HAS_VCRYPTO) ++// PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER ++// ABI for Linux Supplement”. ++#define PPC_FEATURE2_HAS_VCRYPTO 0x02000000 ++#endif ++ ++void OPENSSL_cpuid_setup(void) { ++ OPENSSL_ppc64le_hwcap2 = getauxval(AT_HWCAP2); ++} ++ ++int CRYPTO_is_PPC64LE_vcrypto_capable(void) { ++ return (OPENSSL_ppc64le_hwcap2 & PPC_FEATURE2_HAS_VCRYPTO) != 0; ++} ++ ++#endif // OPENSSL_PPC64LE +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/crypto.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/crypto.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/crypto.c +@@ -25,10 +25,12 @@ static_assert(sizeof(ossl_ssize_t) == si + "ossl_ssize_t should be the same size as size_t"); + + #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_STATIC_ARMCAP) && \ +- (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ +- defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) +-// x86, x86_64, and the ARMs need to record the result of a cpuid/getauxval call +-// for the asm to work correctly, unless compiled without asm code. ++ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ ++ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \ ++ defined(OPENSSL_PPC64LE)) ++// x86, x86_64, the ARMs and ppc64le need to record the result of a ++// cpuid/getauxval call for the asm to work correctly, unless compiled without ++// asm code. + #define NEED_CPUID + + #else +@@ -39,7 +41,8 @@ static_assert(sizeof(ossl_ssize_t) == si + #define BORINGSSL_NO_STATIC_INITIALIZER + #endif + +-#endif // !NO_ASM && !STATIC_ARMCAP && (X86 || X86_64 || ARM || AARCH64) ++#endif // !NO_ASM && !STATIC_ARMCAP && ++ // (X86 || X86_64 || ARM || AARCH64 || PPC64LE) + + + // Our assembly does not use the GOT to reference symbols, which means +@@ -83,6 +86,10 @@ uint32_t OPENSSL_get_ia32cap(int idx) { + return OPENSSL_ia32cap_P[idx]; + } + ++#elif defined(OPENSSL_PPC64LE) ++ ++HIDDEN unsigned long OPENSSL_ppc64le_hwcap2 = 0; ++ + #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) + + #include +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/CMakeLists.txt +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/CMakeLists.txt ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/CMakeLists.txt +@@ -19,6 +19,8 @@ perlasm(BCM_SOURCES arm sha1-armv4-large + perlasm(BCM_SOURCES arm sha256-armv4 sha/asm/sha256-armv4.pl) + perlasm(BCM_SOURCES arm sha512-armv4 sha/asm/sha512-armv4.pl) + perlasm(BCM_SOURCES arm vpaes-armv7 aes/asm/vpaes-armv7.pl) ++perlasm(BCM_SOURCES ppc64le aesp8-ppc aes/asm/aesp8-ppc.pl) ++perlasm(BCM_SOURCES ppc64le ghashp8-ppc modes/asm/ghashp8-ppc.pl) + perlasm(BCM_SOURCES x86 aesni-x86 aes/asm/aesni-x86.pl) + perlasm(BCM_SOURCES x86 bn-586 bn/asm/bn-586.pl) + perlasm(BCM_SOURCES x86 co-586 bn/asm/co-586.pl) +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/aes/asm/aesp8-ppc.pl +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/aes/asm/aesp8-ppc.pl +@@ -0,0 +1,3809 @@ ++#! /usr/bin/env perl ++# Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the OpenSSL license (the "License"). You may not use ++# this file except in compliance with the License. You can obtain a copy ++# in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++ ++# ++# ==================================================================== ++# Written by Andy Polyakov for the OpenSSL ++# project. The module is, however, dual licensed under OpenSSL and ++# CRYPTOGAMS licenses depending on where you obtain it. For further ++# details see http://www.openssl.org/~appro/cryptogams/. ++# ==================================================================== ++# ++# This module implements support for AES instructions as per PowerISA ++# specification version 2.07, first implemented by POWER8 processor. ++# The module is endian-agnostic in sense that it supports both big- ++# and little-endian cases. Data alignment in parallelizable modes is ++# handled with VSX loads and stores, which implies MSR.VSX flag being ++# set. It should also be noted that ISA specification doesn't prohibit ++# alignment exceptions for these instructions on page boundaries. ++# Initially alignment was handled in pure AltiVec/VMX way [when data ++# is aligned programmatically, which in turn guarantees exception- ++# free execution], but it turned to hamper performance when vcipher ++# instructions are interleaved. It's reckoned that eventual ++# misalignment penalties at page boundaries are in average lower ++# than additional overhead in pure AltiVec approach. ++# ++# May 2016 ++# ++# Add XTS subroutine, 9x on little- and 12x improvement on big-endian ++# systems were measured. ++# ++###################################################################### ++# Current large-block performance in cycles per byte processed with ++# 128-bit key (less is better). ++# ++# CBC en-/decrypt CTR XTS ++# POWER8[le] 3.96/0.72 0.74 1.1 ++# POWER8[be] 3.75/0.65 0.66 1.0 ++# POWER9[le] 4.02/0.86 0.84 1.05 ++# POWER9[be] 3.99/0.78 0.79 0.97 ++ ++$flavour = shift; ++$output = shift; ++ ++if ($flavour =~ /64/) { ++ $SIZE_T =8; ++ $LRSAVE =2*$SIZE_T; ++ $STU ="stdu"; ++ $POP ="ld"; ++ $PUSH ="std"; ++ $UCMP ="cmpld"; ++ $SHL ="sldi"; ++} elsif ($flavour =~ /32/) { ++ $SIZE_T =4; ++ $LRSAVE =$SIZE_T; ++ $STU ="stwu"; ++ $POP ="lwz"; ++ $PUSH ="stw"; ++ $UCMP ="cmplw"; ++ $SHL ="slwi"; ++} else { die "nonsense $flavour"; } ++ ++$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0; ++ ++$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ++( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ++( $xlate="${dir}../../../perlasm/ppc-xlate.pl" and -f $xlate) or ++die "can't locate ppc-xlate.pl"; ++ ++open OUT,"| $^X \"$xlate\" $flavour \"$output\"" || die "can't call $xlate: $!"; ++*STDOUT=*OUT; ++ ++$FRAME=8*$SIZE_T; ++$prefix="aes_hw"; ++ ++$sp="r1"; ++$vrsave="r12"; ++ ++######################################################################### ++{{{ # Key setup procedures # ++my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8)); ++my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6)); ++my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11)); ++ ++$code.=<<___; ++.machine "any" ++ ++.text ++ ++.align 7 ++Lrcon: ++.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev ++.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev ++.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev ++.long 0,0,0,0 ?asis ++Lconsts: ++ mflr r0 ++ bcl 20,31,\$+4 ++ mflr $ptr #vvvvv "distance between . and rcon ++ addi $ptr,$ptr,-0x48 ++ mtlr r0 ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,0,0 ++.asciz "AES for PowerISA 2.07, CRYPTOGAMS by " ++ ++.globl .${prefix}_set_encrypt_key ++.align 5 ++.${prefix}_set_encrypt_key: ++Lset_encrypt_key: ++ mflr r11 ++ $PUSH r11,$LRSAVE($sp) ++ ++ li $ptr,-1 ++ ${UCMP}i $inp,0 ++ beq- Lenc_key_abort # if ($inp==0) return -1; ++ ${UCMP}i $out,0 ++ beq- Lenc_key_abort # if ($out==0) return -1; ++ li $ptr,-2 ++ cmpwi $bits,128 ++ blt- Lenc_key_abort ++ cmpwi $bits,256 ++ bgt- Lenc_key_abort ++ andi. r0,$bits,0x3f ++ bne- Lenc_key_abort ++ ++ lis r0,0xfff0 ++ mfspr $vrsave,256 ++ mtspr 256,r0 ++ ++ bl Lconsts ++ mtlr r11 ++ ++ neg r9,$inp ++ lvx $in0,0,$inp ++ addi $inp,$inp,15 # 15 is not typo ++ lvsr $key,0,r9 # borrow $key ++ li r8,0x20 ++ cmpwi $bits,192 ++ lvx $in1,0,$inp ++ le?vspltisb $mask,0x0f # borrow $mask ++ lvx $rcon,0,$ptr ++ le?vxor $key,$key,$mask # adjust for byte swap ++ lvx $mask,r8,$ptr ++ addi $ptr,$ptr,0x10 ++ vperm $in0,$in0,$in1,$key # align [and byte swap in LE] ++ li $cnt,8 ++ vxor $zero,$zero,$zero ++ mtctr $cnt ++ ++ ?lvsr $outperm,0,$out ++ vspltisb $outmask,-1 ++ lvx $outhead,0,$out ++ ?vperm $outmask,$zero,$outmask,$outperm ++ ++ blt Loop128 ++ addi $inp,$inp,8 ++ beq L192 ++ addi $inp,$inp,8 ++ b L256 ++ ++.align 4 ++Loop128: ++ vperm $key,$in0,$in0,$mask # rotate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vcipherlast $key,$key,$rcon ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vadduwm $rcon,$rcon,$rcon ++ vxor $in0,$in0,$key ++ bdnz Loop128 ++ ++ lvx $rcon,0,$ptr # last two round keys ++ ++ vperm $key,$in0,$in0,$mask # rotate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vcipherlast $key,$key,$rcon ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vadduwm $rcon,$rcon,$rcon ++ vxor $in0,$in0,$key ++ ++ vperm $key,$in0,$in0,$mask # rotate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vcipherlast $key,$key,$rcon ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vxor $in0,$in0,$key ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ stvx $stage,0,$out ++ ++ addi $inp,$out,15 # 15 is not typo ++ addi $out,$out,0x50 ++ ++ li $rounds,10 ++ b Ldone ++ ++.align 4 ++L192: ++ lvx $tmp,0,$inp ++ li $cnt,4 ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE] ++ vspltisb $key,8 # borrow $key ++ mtctr $cnt ++ vsububm $mask,$mask,$key # adjust the mask ++ ++Loop192: ++ vperm $key,$in1,$in1,$mask # roate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vcipherlast $key,$key,$rcon ++ ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ ++ vsldoi $stage,$zero,$in1,8 ++ vspltw $tmp,$in0,3 ++ vxor $tmp,$tmp,$in1 ++ vsldoi $in1,$zero,$in1,12 # >>32 ++ vadduwm $rcon,$rcon,$rcon ++ vxor $in1,$in1,$tmp ++ vxor $in0,$in0,$key ++ vxor $in1,$in1,$key ++ vsldoi $stage,$stage,$in0,8 ++ ++ vperm $key,$in1,$in1,$mask # rotate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vperm $outtail,$stage,$stage,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vcipherlast $key,$key,$rcon ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vsldoi $stage,$in0,$in1,8 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vperm $outtail,$stage,$stage,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vspltw $tmp,$in0,3 ++ vxor $tmp,$tmp,$in1 ++ vsldoi $in1,$zero,$in1,12 # >>32 ++ vadduwm $rcon,$rcon,$rcon ++ vxor $in1,$in1,$tmp ++ vxor $in0,$in0,$key ++ vxor $in1,$in1,$key ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ stvx $stage,0,$out ++ addi $inp,$out,15 # 15 is not typo ++ addi $out,$out,16 ++ bdnz Loop192 ++ ++ li $rounds,12 ++ addi $out,$out,0x20 ++ b Ldone ++ ++.align 4 ++L256: ++ lvx $tmp,0,$inp ++ li $cnt,7 ++ li $rounds,14 ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE] ++ mtctr $cnt ++ ++Loop256: ++ vperm $key,$in1,$in1,$mask # rotate-n-splat ++ vsldoi $tmp,$zero,$in0,12 # >>32 ++ vperm $outtail,$in1,$in1,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ vcipherlast $key,$key,$rcon ++ stvx $stage,0,$out ++ addi $out,$out,16 ++ ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in0,$in0,$tmp ++ vadduwm $rcon,$rcon,$rcon ++ vxor $in0,$in0,$key ++ vperm $outtail,$in0,$in0,$outperm # rotate ++ vsel $stage,$outhead,$outtail,$outmask ++ vmr $outhead,$outtail ++ stvx $stage,0,$out ++ addi $inp,$out,15 # 15 is not typo ++ addi $out,$out,16 ++ bdz Ldone ++ ++ vspltw $key,$in0,3 # just splat ++ vsldoi $tmp,$zero,$in1,12 # >>32 ++ vsbox $key,$key ++ ++ vxor $in1,$in1,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in1,$in1,$tmp ++ vsldoi $tmp,$zero,$tmp,12 # >>32 ++ vxor $in1,$in1,$tmp ++ ++ vxor $in1,$in1,$key ++ b Loop256 ++ ++.align 4 ++Ldone: ++ lvx $in1,0,$inp # redundant in aligned case ++ vsel $in1,$outhead,$in1,$outmask ++ stvx $in1,0,$inp ++ li $ptr,0 ++ mtspr 256,$vrsave ++ stw $rounds,0($out) ++ ++Lenc_key_abort: ++ mr r3,$ptr ++ blr ++ .long 0 ++ .byte 0,12,0x14,1,0,0,3,0 ++ .long 0 ++.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key ++ ++.globl .${prefix}_set_decrypt_key ++.align 5 ++.${prefix}_set_decrypt_key: ++ $STU $sp,-$FRAME($sp) ++ mflr r10 ++ $PUSH r10,`$FRAME+$LRSAVE`($sp) ++ bl Lset_encrypt_key ++ mtlr r10 ++ ++ cmpwi r3,0 ++ bne- Ldec_key_abort ++ ++ slwi $cnt,$rounds,4 ++ subi $inp,$out,240 # first round key ++ srwi $rounds,$rounds,1 ++ add $out,$inp,$cnt # last round key ++ mtctr $rounds ++ ++Ldeckey: ++ lwz r0, 0($inp) ++ lwz r6, 4($inp) ++ lwz r7, 8($inp) ++ lwz r8, 12($inp) ++ addi $inp,$inp,16 ++ lwz r9, 0($out) ++ lwz r10,4($out) ++ lwz r11,8($out) ++ lwz r12,12($out) ++ stw r0, 0($out) ++ stw r6, 4($out) ++ stw r7, 8($out) ++ stw r8, 12($out) ++ subi $out,$out,16 ++ stw r9, -16($inp) ++ stw r10,-12($inp) ++ stw r11,-8($inp) ++ stw r12,-4($inp) ++ bdnz Ldeckey ++ ++ xor r3,r3,r3 # return value ++Ldec_key_abort: ++ addi $sp,$sp,$FRAME ++ blr ++ .long 0 ++ .byte 0,12,4,1,0x80,0,3,0 ++ .long 0 ++.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key ++___ ++}}} ++######################################################################### ++{{{ # Single block en- and decrypt procedures # ++sub gen_block () { ++my $dir = shift; ++my $n = $dir eq "de" ? "n" : ""; ++my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7)); ++ ++$code.=<<___; ++.globl .${prefix}_${dir}crypt ++.align 5 ++.${prefix}_${dir}crypt: ++ lwz $rounds,240($key) ++ lis r0,0xfc00 ++ mfspr $vrsave,256 ++ li $idx,15 # 15 is not typo ++ mtspr 256,r0 ++ ++ lvx v0,0,$inp ++ neg r11,$out ++ lvx v1,$idx,$inp ++ lvsl v2,0,$inp # inpperm ++ le?vspltisb v4,0x0f ++ ?lvsl v3,0,r11 # outperm ++ le?vxor v2,v2,v4 ++ li $idx,16 ++ vperm v0,v0,v1,v2 # align [and byte swap in LE] ++ lvx v1,0,$key ++ ?lvsl v5,0,$key # keyperm ++ srwi $rounds,$rounds,1 ++ lvx v2,$idx,$key ++ addi $idx,$idx,16 ++ subi $rounds,$rounds,1 ++ ?vperm v1,v1,v2,v5 # align round key ++ ++ vxor v0,v0,v1 ++ lvx v1,$idx,$key ++ addi $idx,$idx,16 ++ mtctr $rounds ++ ++Loop_${dir}c: ++ ?vperm v2,v2,v1,v5 ++ v${n}cipher v0,v0,v2 ++ lvx v2,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm v1,v1,v2,v5 ++ v${n}cipher v0,v0,v1 ++ lvx v1,$idx,$key ++ addi $idx,$idx,16 ++ bdnz Loop_${dir}c ++ ++ ?vperm v2,v2,v1,v5 ++ v${n}cipher v0,v0,v2 ++ lvx v2,$idx,$key ++ ?vperm v1,v1,v2,v5 ++ v${n}cipherlast v0,v0,v1 ++ ++ vspltisb v2,-1 ++ vxor v1,v1,v1 ++ li $idx,15 # 15 is not typo ++ ?vperm v2,v1,v2,v3 # outmask ++ le?vxor v3,v3,v4 ++ lvx v1,0,$out # outhead ++ vperm v0,v0,v0,v3 # rotate [and byte swap in LE] ++ vsel v1,v1,v0,v2 ++ lvx v4,$idx,$out ++ stvx v1,0,$out ++ vsel v0,v0,v4,v2 ++ stvx v0,$idx,$out ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,3,0 ++ .long 0 ++.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt ++___ ++} ++&gen_block("en"); ++&gen_block("de"); ++}}} ++######################################################################### ++{{{ # CBC en- and decrypt procedures # ++my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10)); ++my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); ++my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)= ++ map("v$_",(4..10)); ++$code.=<<___; ++.globl .${prefix}_cbc_encrypt ++.align 5 ++.${prefix}_cbc_encrypt: ++ ${UCMP}i $len,16 ++ bltlr- ++ ++ cmpwi $enc,0 # test direction ++ lis r0,0xffe0 ++ mfspr $vrsave,256 ++ mtspr 256,r0 ++ ++ li $idx,15 ++ vxor $rndkey0,$rndkey0,$rndkey0 ++ le?vspltisb $tmp,0x0f ++ ++ lvx $ivec,0,$ivp # load [unaligned] iv ++ lvsl $inpperm,0,$ivp ++ lvx $inptail,$idx,$ivp ++ le?vxor $inpperm,$inpperm,$tmp ++ vperm $ivec,$ivec,$inptail,$inpperm ++ ++ neg r11,$inp ++ ?lvsl $keyperm,0,$key # prepare for unaligned key ++ lwz $rounds,240($key) ++ ++ lvsr $inpperm,0,r11 # prepare for unaligned load ++ lvx $inptail,0,$inp ++ addi $inp,$inp,15 # 15 is not typo ++ le?vxor $inpperm,$inpperm,$tmp ++ ++ ?lvsr $outperm,0,$out # prepare for unaligned store ++ vspltisb $outmask,-1 ++ lvx $outhead,0,$out ++ ?vperm $outmask,$rndkey0,$outmask,$outperm ++ le?vxor $outperm,$outperm,$tmp ++ ++ srwi $rounds,$rounds,1 ++ li $idx,16 ++ subi $rounds,$rounds,1 ++ beq Lcbc_dec ++ ++Lcbc_enc: ++ vmr $inout,$inptail ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ mtctr $rounds ++ subi $len,$len,16 # len-=16 ++ ++ lvx $rndkey0,0,$key ++ vperm $inout,$inout,$inptail,$inpperm ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ vxor $inout,$inout,$ivec ++ ++Loop_cbc_enc: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ bdnz Loop_cbc_enc ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ li $idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipherlast $ivec,$inout,$rndkey0 ++ ${UCMP}i $len,16 ++ ++ vperm $tmp,$ivec,$ivec,$outperm ++ vsel $inout,$outhead,$tmp,$outmask ++ vmr $outhead,$tmp ++ stvx $inout,0,$out ++ addi $out,$out,16 ++ bge Lcbc_enc ++ ++ b Lcbc_done ++ ++.align 4 ++Lcbc_dec: ++ ${UCMP}i $len,128 ++ bge _aesp8_cbc_decrypt8x ++ vmr $tmp,$inptail ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ mtctr $rounds ++ subi $len,$len,16 # len-=16 ++ ++ lvx $rndkey0,0,$key ++ vperm $tmp,$tmp,$inptail,$inpperm ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$tmp,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ ++Loop_cbc_dec: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vncipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ bdnz Loop_cbc_dec ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ li $idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vncipherlast $inout,$inout,$rndkey0 ++ ${UCMP}i $len,16 ++ ++ vxor $inout,$inout,$ivec ++ vmr $ivec,$tmp ++ vperm $tmp,$inout,$inout,$outperm ++ vsel $inout,$outhead,$tmp,$outmask ++ vmr $outhead,$tmp ++ stvx $inout,0,$out ++ addi $out,$out,16 ++ bge Lcbc_dec ++ ++Lcbc_done: ++ addi $out,$out,-1 ++ lvx $inout,0,$out # redundant in aligned case ++ vsel $inout,$outhead,$inout,$outmask ++ stvx $inout,0,$out ++ ++ neg $enc,$ivp # write [unaligned] iv ++ li $idx,15 # 15 is not typo ++ vxor $rndkey0,$rndkey0,$rndkey0 ++ vspltisb $outmask,-1 ++ le?vspltisb $tmp,0x0f ++ ?lvsl $outperm,0,$enc ++ ?vperm $outmask,$rndkey0,$outmask,$outperm ++ le?vxor $outperm,$outperm,$tmp ++ lvx $outhead,0,$ivp ++ vperm $ivec,$ivec,$ivec,$outperm ++ vsel $inout,$outhead,$ivec,$outmask ++ lvx $inptail,$idx,$ivp ++ stvx $inout,0,$ivp ++ vsel $inout,$ivec,$inptail,$outmask ++ stvx $inout,$idx,$ivp ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,6,0 ++ .long 0 ++___ ++######################################################################### ++{{ # Optimized CBC decrypt procedure # ++my $key_="r11"; ++my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31)); ++ $x00=0 if ($flavour =~ /osx/); ++my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13)); ++my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21)); ++my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys ++ # v26-v31 last 6 round keys ++my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment ++ ++$code.=<<___; ++.align 5 ++_aesp8_cbc_decrypt8x: ++ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) ++ li r10,`$FRAME+8*16+15` ++ li r11,`$FRAME+8*16+31` ++ stvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ stvx v21,r11,$sp ++ addi r11,r11,32 ++ stvx v22,r10,$sp ++ addi r10,r10,32 ++ stvx v23,r11,$sp ++ addi r11,r11,32 ++ stvx v24,r10,$sp ++ addi r10,r10,32 ++ stvx v25,r11,$sp ++ addi r11,r11,32 ++ stvx v26,r10,$sp ++ addi r10,r10,32 ++ stvx v27,r11,$sp ++ addi r11,r11,32 ++ stvx v28,r10,$sp ++ addi r10,r10,32 ++ stvx v29,r11,$sp ++ addi r11,r11,32 ++ stvx v30,r10,$sp ++ stvx v31,r11,$sp ++ li r0,-1 ++ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave ++ li $x10,0x10 ++ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ li $x20,0x20 ++ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ li $x30,0x30 ++ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ li $x40,0x40 ++ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ li $x50,0x50 ++ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ li $x60,0x60 ++ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ li $x70,0x70 ++ mtspr 256,r0 ++ ++ subi $rounds,$rounds,3 # -4 in total ++ subi $len,$len,128 # bias ++ ++ lvx $rndkey0,$x00,$key # load key schedule ++ lvx v30,$x10,$key ++ addi $key,$key,0x20 ++ lvx v31,$x00,$key ++ ?vperm $rndkey0,$rndkey0,v30,$keyperm ++ addi $key_,$sp,`$FRAME+15` ++ mtctr $rounds ++ ++Load_cbc_dec_key: ++ ?vperm v24,v30,v31,$keyperm ++ lvx v30,$x10,$key ++ addi $key,$key,0x20 ++ stvx v24,$x00,$key_ # off-load round[1] ++ ?vperm v25,v31,v30,$keyperm ++ lvx v31,$x00,$key ++ stvx v25,$x10,$key_ # off-load round[2] ++ addi $key_,$key_,0x20 ++ bdnz Load_cbc_dec_key ++ ++ lvx v26,$x10,$key ++ ?vperm v24,v30,v31,$keyperm ++ lvx v27,$x20,$key ++ stvx v24,$x00,$key_ # off-load round[3] ++ ?vperm v25,v31,v26,$keyperm ++ lvx v28,$x30,$key ++ stvx v25,$x10,$key_ # off-load round[4] ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ ?vperm v26,v26,v27,$keyperm ++ lvx v29,$x40,$key ++ ?vperm v27,v27,v28,$keyperm ++ lvx v30,$x50,$key ++ ?vperm v28,v28,v29,$keyperm ++ lvx v31,$x60,$key ++ ?vperm v29,v29,v30,$keyperm ++ lvx $out0,$x70,$key # borrow $out0 ++ ?vperm v30,v30,v31,$keyperm ++ lvx v24,$x00,$key_ # pre-load round[1] ++ ?vperm v31,v31,$out0,$keyperm ++ lvx v25,$x10,$key_ # pre-load round[2] ++ ++ #lvx $inptail,0,$inp # "caller" already did this ++ #addi $inp,$inp,15 # 15 is not typo ++ subi $inp,$inp,15 # undo "caller" ++ ++ le?li $idx,8 ++ lvx_u $in0,$x00,$inp # load first 8 "words" ++ le?lvsl $inpperm,0,$idx ++ le?vspltisb $tmp,0x0f ++ lvx_u $in1,$x10,$inp ++ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u ++ lvx_u $in2,$x20,$inp ++ le?vperm $in0,$in0,$in0,$inpperm ++ lvx_u $in3,$x30,$inp ++ le?vperm $in1,$in1,$in1,$inpperm ++ lvx_u $in4,$x40,$inp ++ le?vperm $in2,$in2,$in2,$inpperm ++ vxor $out0,$in0,$rndkey0 ++ lvx_u $in5,$x50,$inp ++ le?vperm $in3,$in3,$in3,$inpperm ++ vxor $out1,$in1,$rndkey0 ++ lvx_u $in6,$x60,$inp ++ le?vperm $in4,$in4,$in4,$inpperm ++ vxor $out2,$in2,$rndkey0 ++ lvx_u $in7,$x70,$inp ++ addi $inp,$inp,0x80 ++ le?vperm $in5,$in5,$in5,$inpperm ++ vxor $out3,$in3,$rndkey0 ++ le?vperm $in6,$in6,$in6,$inpperm ++ vxor $out4,$in4,$rndkey0 ++ le?vperm $in7,$in7,$in7,$inpperm ++ vxor $out5,$in5,$rndkey0 ++ vxor $out6,$in6,$rndkey0 ++ vxor $out7,$in7,$rndkey0 ++ ++ mtctr $rounds ++ b Loop_cbc_dec8x ++.align 5 ++Loop_cbc_dec8x: ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ vncipher $out6,$out6,v24 ++ vncipher $out7,$out7,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ vncipher $out6,$out6,v25 ++ vncipher $out7,$out7,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_cbc_dec8x ++ ++ subic $len,$len,128 # $len-=128 ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ vncipher $out6,$out6,v24 ++ vncipher $out7,$out7,v24 ++ ++ subfe. r0,r0,r0 # borrow?-1:0 ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ vncipher $out6,$out6,v25 ++ vncipher $out7,$out7,v25 ++ ++ and r0,r0,$len ++ vncipher $out0,$out0,v26 ++ vncipher $out1,$out1,v26 ++ vncipher $out2,$out2,v26 ++ vncipher $out3,$out3,v26 ++ vncipher $out4,$out4,v26 ++ vncipher $out5,$out5,v26 ++ vncipher $out6,$out6,v26 ++ vncipher $out7,$out7,v26 ++ ++ add $inp,$inp,r0 # $inp is adjusted in such ++ # way that at exit from the ++ # loop inX-in7 are loaded ++ # with last "words" ++ vncipher $out0,$out0,v27 ++ vncipher $out1,$out1,v27 ++ vncipher $out2,$out2,v27 ++ vncipher $out3,$out3,v27 ++ vncipher $out4,$out4,v27 ++ vncipher $out5,$out5,v27 ++ vncipher $out6,$out6,v27 ++ vncipher $out7,$out7,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vncipher $out0,$out0,v28 ++ vncipher $out1,$out1,v28 ++ vncipher $out2,$out2,v28 ++ vncipher $out3,$out3,v28 ++ vncipher $out4,$out4,v28 ++ vncipher $out5,$out5,v28 ++ vncipher $out6,$out6,v28 ++ vncipher $out7,$out7,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ ++ vncipher $out0,$out0,v29 ++ vncipher $out1,$out1,v29 ++ vncipher $out2,$out2,v29 ++ vncipher $out3,$out3,v29 ++ vncipher $out4,$out4,v29 ++ vncipher $out5,$out5,v29 ++ vncipher $out6,$out6,v29 ++ vncipher $out7,$out7,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ ++ vncipher $out0,$out0,v30 ++ vxor $ivec,$ivec,v31 # xor with last round key ++ vncipher $out1,$out1,v30 ++ vxor $in0,$in0,v31 ++ vncipher $out2,$out2,v30 ++ vxor $in1,$in1,v31 ++ vncipher $out3,$out3,v30 ++ vxor $in2,$in2,v31 ++ vncipher $out4,$out4,v30 ++ vxor $in3,$in3,v31 ++ vncipher $out5,$out5,v30 ++ vxor $in4,$in4,v31 ++ vncipher $out6,$out6,v30 ++ vxor $in5,$in5,v31 ++ vncipher $out7,$out7,v30 ++ vxor $in6,$in6,v31 ++ ++ vncipherlast $out0,$out0,$ivec ++ vncipherlast $out1,$out1,$in0 ++ lvx_u $in0,$x00,$inp # load next input block ++ vncipherlast $out2,$out2,$in1 ++ lvx_u $in1,$x10,$inp ++ vncipherlast $out3,$out3,$in2 ++ le?vperm $in0,$in0,$in0,$inpperm ++ lvx_u $in2,$x20,$inp ++ vncipherlast $out4,$out4,$in3 ++ le?vperm $in1,$in1,$in1,$inpperm ++ lvx_u $in3,$x30,$inp ++ vncipherlast $out5,$out5,$in4 ++ le?vperm $in2,$in2,$in2,$inpperm ++ lvx_u $in4,$x40,$inp ++ vncipherlast $out6,$out6,$in5 ++ le?vperm $in3,$in3,$in3,$inpperm ++ lvx_u $in5,$x50,$inp ++ vncipherlast $out7,$out7,$in6 ++ le?vperm $in4,$in4,$in4,$inpperm ++ lvx_u $in6,$x60,$inp ++ vmr $ivec,$in7 ++ le?vperm $in5,$in5,$in5,$inpperm ++ lvx_u $in7,$x70,$inp ++ addi $inp,$inp,0x80 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $in6,$in6,$in6,$inpperm ++ vxor $out0,$in0,$rndkey0 ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $in7,$in7,$in7,$inpperm ++ vxor $out1,$in1,$rndkey0 ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ vxor $out2,$in2,$rndkey0 ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x30,$out ++ vxor $out3,$in3,$rndkey0 ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x40,$out ++ vxor $out4,$in4,$rndkey0 ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x50,$out ++ vxor $out5,$in5,$rndkey0 ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x60,$out ++ vxor $out6,$in6,$rndkey0 ++ stvx_u $out7,$x70,$out ++ addi $out,$out,0x80 ++ vxor $out7,$in7,$rndkey0 ++ ++ mtctr $rounds ++ beq Loop_cbc_dec8x # did $len-=128 borrow? ++ ++ addic. $len,$len,128 ++ beq Lcbc_dec8x_done ++ nop ++ nop ++ ++Loop_cbc_dec8x_tail: # up to 7 "words" tail... ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ vncipher $out6,$out6,v24 ++ vncipher $out7,$out7,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ vncipher $out6,$out6,v25 ++ vncipher $out7,$out7,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_cbc_dec8x_tail ++ ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ vncipher $out6,$out6,v24 ++ vncipher $out7,$out7,v24 ++ ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ vncipher $out6,$out6,v25 ++ vncipher $out7,$out7,v25 ++ ++ vncipher $out1,$out1,v26 ++ vncipher $out2,$out2,v26 ++ vncipher $out3,$out3,v26 ++ vncipher $out4,$out4,v26 ++ vncipher $out5,$out5,v26 ++ vncipher $out6,$out6,v26 ++ vncipher $out7,$out7,v26 ++ ++ vncipher $out1,$out1,v27 ++ vncipher $out2,$out2,v27 ++ vncipher $out3,$out3,v27 ++ vncipher $out4,$out4,v27 ++ vncipher $out5,$out5,v27 ++ vncipher $out6,$out6,v27 ++ vncipher $out7,$out7,v27 ++ ++ vncipher $out1,$out1,v28 ++ vncipher $out2,$out2,v28 ++ vncipher $out3,$out3,v28 ++ vncipher $out4,$out4,v28 ++ vncipher $out5,$out5,v28 ++ vncipher $out6,$out6,v28 ++ vncipher $out7,$out7,v28 ++ ++ vncipher $out1,$out1,v29 ++ vncipher $out2,$out2,v29 ++ vncipher $out3,$out3,v29 ++ vncipher $out4,$out4,v29 ++ vncipher $out5,$out5,v29 ++ vncipher $out6,$out6,v29 ++ vncipher $out7,$out7,v29 ++ ++ vncipher $out1,$out1,v30 ++ vxor $ivec,$ivec,v31 # last round key ++ vncipher $out2,$out2,v30 ++ vxor $in1,$in1,v31 ++ vncipher $out3,$out3,v30 ++ vxor $in2,$in2,v31 ++ vncipher $out4,$out4,v30 ++ vxor $in3,$in3,v31 ++ vncipher $out5,$out5,v30 ++ vxor $in4,$in4,v31 ++ vncipher $out6,$out6,v30 ++ vxor $in5,$in5,v31 ++ vncipher $out7,$out7,v30 ++ vxor $in6,$in6,v31 ++ ++ cmplwi $len,32 # switch($len) ++ blt Lcbc_dec8x_one ++ nop ++ beq Lcbc_dec8x_two ++ cmplwi $len,64 ++ blt Lcbc_dec8x_three ++ nop ++ beq Lcbc_dec8x_four ++ cmplwi $len,96 ++ blt Lcbc_dec8x_five ++ nop ++ beq Lcbc_dec8x_six ++ ++Lcbc_dec8x_seven: ++ vncipherlast $out1,$out1,$ivec ++ vncipherlast $out2,$out2,$in1 ++ vncipherlast $out3,$out3,$in2 ++ vncipherlast $out4,$out4,$in3 ++ vncipherlast $out5,$out5,$in4 ++ vncipherlast $out6,$out6,$in5 ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out1,$out1,$out1,$inpperm ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x00,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x10,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x20,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x30,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x40,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x50,$out ++ stvx_u $out7,$x60,$out ++ addi $out,$out,0x70 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_six: ++ vncipherlast $out2,$out2,$ivec ++ vncipherlast $out3,$out3,$in2 ++ vncipherlast $out4,$out4,$in3 ++ vncipherlast $out5,$out5,$in4 ++ vncipherlast $out6,$out6,$in5 ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out2,$out2,$out2,$inpperm ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x00,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x10,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x20,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x30,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x40,$out ++ stvx_u $out7,$x50,$out ++ addi $out,$out,0x60 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_five: ++ vncipherlast $out3,$out3,$ivec ++ vncipherlast $out4,$out4,$in3 ++ vncipherlast $out5,$out5,$in4 ++ vncipherlast $out6,$out6,$in5 ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out3,$out3,$out3,$inpperm ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x00,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x10,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x20,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x30,$out ++ stvx_u $out7,$x40,$out ++ addi $out,$out,0x50 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_four: ++ vncipherlast $out4,$out4,$ivec ++ vncipherlast $out5,$out5,$in4 ++ vncipherlast $out6,$out6,$in5 ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out4,$out4,$out4,$inpperm ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x00,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x10,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x20,$out ++ stvx_u $out7,$x30,$out ++ addi $out,$out,0x40 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_three: ++ vncipherlast $out5,$out5,$ivec ++ vncipherlast $out6,$out6,$in5 ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out5,$out5,$out5,$inpperm ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x00,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x10,$out ++ stvx_u $out7,$x20,$out ++ addi $out,$out,0x30 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_two: ++ vncipherlast $out6,$out6,$ivec ++ vncipherlast $out7,$out7,$in6 ++ vmr $ivec,$in7 ++ ++ le?vperm $out6,$out6,$out6,$inpperm ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x00,$out ++ stvx_u $out7,$x10,$out ++ addi $out,$out,0x20 ++ b Lcbc_dec8x_done ++ ++.align 5 ++Lcbc_dec8x_one: ++ vncipherlast $out7,$out7,$ivec ++ vmr $ivec,$in7 ++ ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out7,0,$out ++ addi $out,$out,0x10 ++ ++Lcbc_dec8x_done: ++ le?vperm $ivec,$ivec,$ivec,$inpperm ++ stvx_u $ivec,0,$ivp # write [unaligned] iv ++ ++ li r10,`$FRAME+15` ++ li r11,`$FRAME+31` ++ stvx $inpperm,r10,$sp # wipe copies of round keys ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ ++ mtspr 256,$vrsave ++ lvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ lvx v21,r11,$sp ++ addi r11,r11,32 ++ lvx v22,r10,$sp ++ addi r10,r10,32 ++ lvx v23,r11,$sp ++ addi r11,r11,32 ++ lvx v24,r10,$sp ++ addi r10,r10,32 ++ lvx v25,r11,$sp ++ addi r11,r11,32 ++ lvx v26,r10,$sp ++ addi r10,r10,32 ++ lvx v27,r11,$sp ++ addi r11,r11,32 ++ lvx v28,r10,$sp ++ addi r10,r10,32 ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp ++ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` ++ blr ++ .long 0 ++ .byte 0,12,0x04,0,0x80,6,6,0 ++ .long 0 ++.size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt ++___ ++}} }}} ++ ++######################################################################### ++{{{ # CTR procedure[s] # ++my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); ++my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); ++my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= ++ map("v$_",(4..11)); ++my $dat=$tmp; ++ ++$code.=<<___; ++.globl .${prefix}_ctr32_encrypt_blocks ++.align 5 ++.${prefix}_ctr32_encrypt_blocks: ++ ${UCMP}i $len,1 ++ bltlr- ++ ++ lis r0,0xfff0 ++ mfspr $vrsave,256 ++ mtspr 256,r0 ++ ++ li $idx,15 ++ vxor $rndkey0,$rndkey0,$rndkey0 ++ le?vspltisb $tmp,0x0f ++ ++ lvx $ivec,0,$ivp # load [unaligned] iv ++ lvsl $inpperm,0,$ivp ++ lvx $inptail,$idx,$ivp ++ vspltisb $one,1 ++ le?vxor $inpperm,$inpperm,$tmp ++ vperm $ivec,$ivec,$inptail,$inpperm ++ vsldoi $one,$rndkey0,$one,1 ++ ++ neg r11,$inp ++ ?lvsl $keyperm,0,$key # prepare for unaligned key ++ lwz $rounds,240($key) ++ ++ lvsr $inpperm,0,r11 # prepare for unaligned load ++ lvx $inptail,0,$inp ++ addi $inp,$inp,15 # 15 is not typo ++ le?vxor $inpperm,$inpperm,$tmp ++ ++ srwi $rounds,$rounds,1 ++ li $idx,16 ++ subi $rounds,$rounds,1 ++ ++ ${UCMP}i $len,8 ++ bge _aesp8_ctr32_encrypt8x ++ ++ ?lvsr $outperm,0,$out # prepare for unaligned store ++ vspltisb $outmask,-1 ++ lvx $outhead,0,$out ++ ?vperm $outmask,$rndkey0,$outmask,$outperm ++ le?vxor $outperm,$outperm,$tmp ++ ++ lvx $rndkey0,0,$key ++ mtctr $rounds ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$ivec,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ b Loop_ctr32_enc ++ ++.align 5 ++Loop_ctr32_enc: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ bdnz Loop_ctr32_enc ++ ++ vadduwm $ivec,$ivec,$one ++ vmr $dat,$inptail ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ subic. $len,$len,1 # blocks-- ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key ++ vperm $dat,$dat,$inptail,$inpperm ++ li $idx,16 ++ ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm ++ lvx $rndkey0,0,$key ++ vxor $dat,$dat,$rndkey1 # last round key ++ vcipherlast $inout,$inout,$dat ++ ++ lvx $rndkey1,$idx,$key ++ addi $idx,$idx,16 ++ vperm $inout,$inout,$inout,$outperm ++ vsel $dat,$outhead,$inout,$outmask ++ mtctr $rounds ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vmr $outhead,$inout ++ vxor $inout,$ivec,$rndkey0 ++ lvx $rndkey0,$idx,$key ++ addi $idx,$idx,16 ++ stvx $dat,0,$out ++ addi $out,$out,16 ++ bne Loop_ctr32_enc ++ ++ addi $out,$out,-1 ++ lvx $inout,0,$out # redundant in aligned case ++ vsel $inout,$outhead,$inout,$outmask ++ stvx $inout,0,$out ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,6,0 ++ .long 0 ++___ ++######################################################################### ++{{ # Optimized CTR procedure # ++my $key_="r11"; ++my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31)); ++ $x00=0 if ($flavour =~ /osx/); ++my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14)); ++my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22)); ++my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys ++ # v26-v31 last 6 round keys ++my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment ++my ($two,$three,$four)=($outhead,$outperm,$outmask); ++ ++$code.=<<___; ++.align 5 ++_aesp8_ctr32_encrypt8x: ++ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) ++ li r10,`$FRAME+8*16+15` ++ li r11,`$FRAME+8*16+31` ++ stvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ stvx v21,r11,$sp ++ addi r11,r11,32 ++ stvx v22,r10,$sp ++ addi r10,r10,32 ++ stvx v23,r11,$sp ++ addi r11,r11,32 ++ stvx v24,r10,$sp ++ addi r10,r10,32 ++ stvx v25,r11,$sp ++ addi r11,r11,32 ++ stvx v26,r10,$sp ++ addi r10,r10,32 ++ stvx v27,r11,$sp ++ addi r11,r11,32 ++ stvx v28,r10,$sp ++ addi r10,r10,32 ++ stvx v29,r11,$sp ++ addi r11,r11,32 ++ stvx v30,r10,$sp ++ stvx v31,r11,$sp ++ li r0,-1 ++ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave ++ li $x10,0x10 ++ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ li $x20,0x20 ++ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ li $x30,0x30 ++ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ li $x40,0x40 ++ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ li $x50,0x50 ++ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ li $x60,0x60 ++ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ li $x70,0x70 ++ mtspr 256,r0 ++ ++ subi $rounds,$rounds,3 # -4 in total ++ ++ lvx $rndkey0,$x00,$key # load key schedule ++ lvx v30,$x10,$key ++ addi $key,$key,0x20 ++ lvx v31,$x00,$key ++ ?vperm $rndkey0,$rndkey0,v30,$keyperm ++ addi $key_,$sp,`$FRAME+15` ++ mtctr $rounds ++ ++Load_ctr32_enc_key: ++ ?vperm v24,v30,v31,$keyperm ++ lvx v30,$x10,$key ++ addi $key,$key,0x20 ++ stvx v24,$x00,$key_ # off-load round[1] ++ ?vperm v25,v31,v30,$keyperm ++ lvx v31,$x00,$key ++ stvx v25,$x10,$key_ # off-load round[2] ++ addi $key_,$key_,0x20 ++ bdnz Load_ctr32_enc_key ++ ++ lvx v26,$x10,$key ++ ?vperm v24,v30,v31,$keyperm ++ lvx v27,$x20,$key ++ stvx v24,$x00,$key_ # off-load round[3] ++ ?vperm v25,v31,v26,$keyperm ++ lvx v28,$x30,$key ++ stvx v25,$x10,$key_ # off-load round[4] ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ ?vperm v26,v26,v27,$keyperm ++ lvx v29,$x40,$key ++ ?vperm v27,v27,v28,$keyperm ++ lvx v30,$x50,$key ++ ?vperm v28,v28,v29,$keyperm ++ lvx v31,$x60,$key ++ ?vperm v29,v29,v30,$keyperm ++ lvx $out0,$x70,$key # borrow $out0 ++ ?vperm v30,v30,v31,$keyperm ++ lvx v24,$x00,$key_ # pre-load round[1] ++ ?vperm v31,v31,$out0,$keyperm ++ lvx v25,$x10,$key_ # pre-load round[2] ++ ++ vadduwm $two,$one,$one ++ subi $inp,$inp,15 # undo "caller" ++ $SHL $len,$len,4 ++ ++ vadduwm $out1,$ivec,$one # counter values ... ++ vadduwm $out2,$ivec,$two ++ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] ++ le?li $idx,8 ++ vadduwm $out3,$out1,$two ++ vxor $out1,$out1,$rndkey0 ++ le?lvsl $inpperm,0,$idx ++ vadduwm $out4,$out2,$two ++ vxor $out2,$out2,$rndkey0 ++ le?vspltisb $tmp,0x0f ++ vadduwm $out5,$out3,$two ++ vxor $out3,$out3,$rndkey0 ++ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u ++ vadduwm $out6,$out4,$two ++ vxor $out4,$out4,$rndkey0 ++ vadduwm $out7,$out5,$two ++ vxor $out5,$out5,$rndkey0 ++ vadduwm $ivec,$out6,$two # next counter value ++ vxor $out6,$out6,$rndkey0 ++ vxor $out7,$out7,$rndkey0 ++ ++ mtctr $rounds ++ b Loop_ctr32_enc8x ++.align 5 ++Loop_ctr32_enc8x: ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vcipher $out4,$out4,v24 ++ vcipher $out5,$out5,v24 ++ vcipher $out6,$out6,v24 ++ vcipher $out7,$out7,v24 ++Loop_ctr32_enc8x_middle: ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vcipher $out4,$out4,v25 ++ vcipher $out5,$out5,v25 ++ vcipher $out6,$out6,v25 ++ vcipher $out7,$out7,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_ctr32_enc8x ++ ++ subic r11,$len,256 # $len-256, borrow $key_ ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vcipher $out4,$out4,v24 ++ vcipher $out5,$out5,v24 ++ vcipher $out6,$out6,v24 ++ vcipher $out7,$out7,v24 ++ ++ subfe r0,r0,r0 # borrow?-1:0 ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vcipher $out4,$out4,v25 ++ vcipher $out5,$out5,v25 ++ vcipher $out6,$out6,v25 ++ vcipher $out7,$out7,v25 ++ ++ and r0,r0,r11 ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vcipher $out0,$out0,v26 ++ vcipher $out1,$out1,v26 ++ vcipher $out2,$out2,v26 ++ vcipher $out3,$out3,v26 ++ vcipher $out4,$out4,v26 ++ vcipher $out5,$out5,v26 ++ vcipher $out6,$out6,v26 ++ vcipher $out7,$out7,v26 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ ++ subic $len,$len,129 # $len-=129 ++ vcipher $out0,$out0,v27 ++ addi $len,$len,1 # $len-=128 really ++ vcipher $out1,$out1,v27 ++ vcipher $out2,$out2,v27 ++ vcipher $out3,$out3,v27 ++ vcipher $out4,$out4,v27 ++ vcipher $out5,$out5,v27 ++ vcipher $out6,$out6,v27 ++ vcipher $out7,$out7,v27 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ ++ vcipher $out0,$out0,v28 ++ lvx_u $in0,$x00,$inp # load input ++ vcipher $out1,$out1,v28 ++ lvx_u $in1,$x10,$inp ++ vcipher $out2,$out2,v28 ++ lvx_u $in2,$x20,$inp ++ vcipher $out3,$out3,v28 ++ lvx_u $in3,$x30,$inp ++ vcipher $out4,$out4,v28 ++ lvx_u $in4,$x40,$inp ++ vcipher $out5,$out5,v28 ++ lvx_u $in5,$x50,$inp ++ vcipher $out6,$out6,v28 ++ lvx_u $in6,$x60,$inp ++ vcipher $out7,$out7,v28 ++ lvx_u $in7,$x70,$inp ++ addi $inp,$inp,0x80 ++ ++ vcipher $out0,$out0,v29 ++ le?vperm $in0,$in0,$in0,$inpperm ++ vcipher $out1,$out1,v29 ++ le?vperm $in1,$in1,$in1,$inpperm ++ vcipher $out2,$out2,v29 ++ le?vperm $in2,$in2,$in2,$inpperm ++ vcipher $out3,$out3,v29 ++ le?vperm $in3,$in3,$in3,$inpperm ++ vcipher $out4,$out4,v29 ++ le?vperm $in4,$in4,$in4,$inpperm ++ vcipher $out5,$out5,v29 ++ le?vperm $in5,$in5,$in5,$inpperm ++ vcipher $out6,$out6,v29 ++ le?vperm $in6,$in6,$in6,$inpperm ++ vcipher $out7,$out7,v29 ++ le?vperm $in7,$in7,$in7,$inpperm ++ ++ add $inp,$inp,r0 # $inp is adjusted in such ++ # way that at exit from the ++ # loop inX-in7 are loaded ++ # with last "words" ++ subfe. r0,r0,r0 # borrow?-1:0 ++ vcipher $out0,$out0,v30 ++ vxor $in0,$in0,v31 # xor with last round key ++ vcipher $out1,$out1,v30 ++ vxor $in1,$in1,v31 ++ vcipher $out2,$out2,v30 ++ vxor $in2,$in2,v31 ++ vcipher $out3,$out3,v30 ++ vxor $in3,$in3,v31 ++ vcipher $out4,$out4,v30 ++ vxor $in4,$in4,v31 ++ vcipher $out5,$out5,v30 ++ vxor $in5,$in5,v31 ++ vcipher $out6,$out6,v30 ++ vxor $in6,$in6,v31 ++ vcipher $out7,$out7,v30 ++ vxor $in7,$in7,v31 ++ ++ bne Lctr32_enc8x_break # did $len-129 borrow? ++ ++ vcipherlast $in0,$out0,$in0 ++ vcipherlast $in1,$out1,$in1 ++ vadduwm $out1,$ivec,$one # counter values ... ++ vcipherlast $in2,$out2,$in2 ++ vadduwm $out2,$ivec,$two ++ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] ++ vcipherlast $in3,$out3,$in3 ++ vadduwm $out3,$out1,$two ++ vxor $out1,$out1,$rndkey0 ++ vcipherlast $in4,$out4,$in4 ++ vadduwm $out4,$out2,$two ++ vxor $out2,$out2,$rndkey0 ++ vcipherlast $in5,$out5,$in5 ++ vadduwm $out5,$out3,$two ++ vxor $out3,$out3,$rndkey0 ++ vcipherlast $in6,$out6,$in6 ++ vadduwm $out6,$out4,$two ++ vxor $out4,$out4,$rndkey0 ++ vcipherlast $in7,$out7,$in7 ++ vadduwm $out7,$out5,$two ++ vxor $out5,$out5,$rndkey0 ++ le?vperm $in0,$in0,$in0,$inpperm ++ vadduwm $ivec,$out6,$two # next counter value ++ vxor $out6,$out6,$rndkey0 ++ le?vperm $in1,$in1,$in1,$inpperm ++ vxor $out7,$out7,$rndkey0 ++ mtctr $rounds ++ ++ vcipher $out0,$out0,v24 ++ stvx_u $in0,$x00,$out ++ le?vperm $in2,$in2,$in2,$inpperm ++ vcipher $out1,$out1,v24 ++ stvx_u $in1,$x10,$out ++ le?vperm $in3,$in3,$in3,$inpperm ++ vcipher $out2,$out2,v24 ++ stvx_u $in2,$x20,$out ++ le?vperm $in4,$in4,$in4,$inpperm ++ vcipher $out3,$out3,v24 ++ stvx_u $in3,$x30,$out ++ le?vperm $in5,$in5,$in5,$inpperm ++ vcipher $out4,$out4,v24 ++ stvx_u $in4,$x40,$out ++ le?vperm $in6,$in6,$in6,$inpperm ++ vcipher $out5,$out5,v24 ++ stvx_u $in5,$x50,$out ++ le?vperm $in7,$in7,$in7,$inpperm ++ vcipher $out6,$out6,v24 ++ stvx_u $in6,$x60,$out ++ vcipher $out7,$out7,v24 ++ stvx_u $in7,$x70,$out ++ addi $out,$out,0x80 ++ ++ b Loop_ctr32_enc8x_middle ++ ++.align 5 ++Lctr32_enc8x_break: ++ cmpwi $len,-0x60 ++ blt Lctr32_enc8x_one ++ nop ++ beq Lctr32_enc8x_two ++ cmpwi $len,-0x40 ++ blt Lctr32_enc8x_three ++ nop ++ beq Lctr32_enc8x_four ++ cmpwi $len,-0x20 ++ blt Lctr32_enc8x_five ++ nop ++ beq Lctr32_enc8x_six ++ cmpwi $len,0x00 ++ blt Lctr32_enc8x_seven ++ ++Lctr32_enc8x_eight: ++ vcipherlast $out0,$out0,$in0 ++ vcipherlast $out1,$out1,$in1 ++ vcipherlast $out2,$out2,$in2 ++ vcipherlast $out3,$out3,$in3 ++ vcipherlast $out4,$out4,$in4 ++ vcipherlast $out5,$out5,$in5 ++ vcipherlast $out6,$out6,$in6 ++ vcipherlast $out7,$out7,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x30,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x40,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x50,$out ++ le?vperm $out7,$out7,$out7,$inpperm ++ stvx_u $out6,$x60,$out ++ stvx_u $out7,$x70,$out ++ addi $out,$out,0x80 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_seven: ++ vcipherlast $out0,$out0,$in1 ++ vcipherlast $out1,$out1,$in2 ++ vcipherlast $out2,$out2,$in3 ++ vcipherlast $out3,$out3,$in4 ++ vcipherlast $out4,$out4,$in5 ++ vcipherlast $out5,$out5,$in6 ++ vcipherlast $out6,$out6,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x30,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x40,$out ++ le?vperm $out6,$out6,$out6,$inpperm ++ stvx_u $out5,$x50,$out ++ stvx_u $out6,$x60,$out ++ addi $out,$out,0x70 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_six: ++ vcipherlast $out0,$out0,$in2 ++ vcipherlast $out1,$out1,$in3 ++ vcipherlast $out2,$out2,$in4 ++ vcipherlast $out3,$out3,$in5 ++ vcipherlast $out4,$out4,$in6 ++ vcipherlast $out5,$out5,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x30,$out ++ le?vperm $out5,$out5,$out5,$inpperm ++ stvx_u $out4,$x40,$out ++ stvx_u $out5,$x50,$out ++ addi $out,$out,0x60 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_five: ++ vcipherlast $out0,$out0,$in3 ++ vcipherlast $out1,$out1,$in4 ++ vcipherlast $out2,$out2,$in5 ++ vcipherlast $out3,$out3,$in6 ++ vcipherlast $out4,$out4,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ le?vperm $out4,$out4,$out4,$inpperm ++ stvx_u $out3,$x30,$out ++ stvx_u $out4,$x40,$out ++ addi $out,$out,0x50 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_four: ++ vcipherlast $out0,$out0,$in4 ++ vcipherlast $out1,$out1,$in5 ++ vcipherlast $out2,$out2,$in6 ++ vcipherlast $out3,$out3,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$inpperm ++ stvx_u $out2,$x20,$out ++ stvx_u $out3,$x30,$out ++ addi $out,$out,0x40 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_three: ++ vcipherlast $out0,$out0,$in5 ++ vcipherlast $out1,$out1,$in6 ++ vcipherlast $out2,$out2,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ le?vperm $out2,$out2,$out2,$inpperm ++ stvx_u $out1,$x10,$out ++ stvx_u $out2,$x20,$out ++ addi $out,$out,0x30 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_two: ++ vcipherlast $out0,$out0,$in6 ++ vcipherlast $out1,$out1,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ le?vperm $out1,$out1,$out1,$inpperm ++ stvx_u $out0,$x00,$out ++ stvx_u $out1,$x10,$out ++ addi $out,$out,0x20 ++ b Lctr32_enc8x_done ++ ++.align 5 ++Lctr32_enc8x_one: ++ vcipherlast $out0,$out0,$in7 ++ ++ le?vperm $out0,$out0,$out0,$inpperm ++ stvx_u $out0,0,$out ++ addi $out,$out,0x10 ++ ++Lctr32_enc8x_done: ++ li r10,`$FRAME+15` ++ li r11,`$FRAME+31` ++ stvx $inpperm,r10,$sp # wipe copies of round keys ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ stvx $inpperm,r10,$sp ++ addi r10,r10,32 ++ stvx $inpperm,r11,$sp ++ addi r11,r11,32 ++ ++ mtspr 256,$vrsave ++ lvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ lvx v21,r11,$sp ++ addi r11,r11,32 ++ lvx v22,r10,$sp ++ addi r10,r10,32 ++ lvx v23,r11,$sp ++ addi r11,r11,32 ++ lvx v24,r10,$sp ++ addi r10,r10,32 ++ lvx v25,r11,$sp ++ addi r11,r11,32 ++ lvx v26,r10,$sp ++ addi r10,r10,32 ++ lvx v27,r11,$sp ++ addi r11,r11,32 ++ lvx v28,r10,$sp ++ addi r10,r10,32 ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp ++ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` ++ blr ++ .long 0 ++ .byte 0,12,0x04,0,0x80,6,6,0 ++ .long 0 ++.size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks ++___ ++}} }}} ++ ++######################################################################### ++{{{ # XTS procedures # ++# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, # ++# const AES_KEY *key1, const AES_KEY *key2, # ++# [const] unsigned char iv[16]); # ++# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which # ++# input tweak value is assumed to be encrypted already, and last tweak # ++# value, one suitable for consecutive call on same chunk of data, is # ++# written back to original buffer. In addition, in "tweak chaining" # ++# mode only complete input blocks are processed. # ++ ++my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10)); ++my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2)); ++my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7)); ++my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12)); ++my $taillen = $key2; ++ ++ ($inp,$idx) = ($idx,$inp); # reassign ++ ++$code.=<<___; ++.globl .${prefix}_xts_encrypt ++.align 5 ++.${prefix}_xts_encrypt: ++ mr $inp,r3 # reassign ++ li r3,-1 ++ ${UCMP}i $len,16 ++ bltlr- ++ ++ lis r0,0xfff0 ++ mfspr r12,256 # save vrsave ++ li r11,0 ++ mtspr 256,r0 ++ ++ vspltisb $seven,0x07 # 0x070707..07 ++ le?lvsl $leperm,r11,r11 ++ le?vspltisb $tmp,0x0f ++ le?vxor $leperm,$leperm,$seven ++ ++ li $idx,15 ++ lvx $tweak,0,$ivp # load [unaligned] iv ++ lvsl $inpperm,0,$ivp ++ lvx $inptail,$idx,$ivp ++ le?vxor $inpperm,$inpperm,$tmp ++ vperm $tweak,$tweak,$inptail,$inpperm ++ ++ neg r11,$inp ++ lvsr $inpperm,0,r11 # prepare for unaligned load ++ lvx $inout,0,$inp ++ addi $inp,$inp,15 # 15 is not typo ++ le?vxor $inpperm,$inpperm,$tmp ++ ++ ${UCMP}i $key2,0 # key2==NULL? ++ beq Lxts_enc_no_key2 ++ ++ ?lvsl $keyperm,0,$key2 # prepare for unaligned key ++ lwz $rounds,240($key2) ++ srwi $rounds,$rounds,1 ++ subi $rounds,$rounds,1 ++ li $idx,16 ++ ++ lvx $rndkey0,0,$key2 ++ lvx $rndkey1,$idx,$key2 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $tweak,$tweak,$rndkey0 ++ lvx $rndkey0,$idx,$key2 ++ addi $idx,$idx,16 ++ mtctr $rounds ++ ++Ltweak_xts_enc: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $tweak,$tweak,$rndkey1 ++ lvx $rndkey1,$idx,$key2 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipher $tweak,$tweak,$rndkey0 ++ lvx $rndkey0,$idx,$key2 ++ addi $idx,$idx,16 ++ bdnz Ltweak_xts_enc ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $tweak,$tweak,$rndkey1 ++ lvx $rndkey1,$idx,$key2 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipherlast $tweak,$tweak,$rndkey0 ++ ++ li $ivp,0 # don't chain the tweak ++ b Lxts_enc ++ ++Lxts_enc_no_key2: ++ li $idx,-16 ++ and $len,$len,$idx # in "tweak chaining" ++ # mode only complete ++ # blocks are processed ++Lxts_enc: ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ ++ ?lvsl $keyperm,0,$key1 # prepare for unaligned key ++ lwz $rounds,240($key1) ++ srwi $rounds,$rounds,1 ++ subi $rounds,$rounds,1 ++ li $idx,16 ++ ++ vslb $eighty7,$seven,$seven # 0x808080..80 ++ vor $eighty7,$eighty7,$seven # 0x878787..87 ++ vspltisb $tmp,1 # 0x010101..01 ++ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01 ++ ++ ${UCMP}i $len,96 ++ bge _aesp8_xts_encrypt6x ++ ++ andi. $taillen,$len,15 ++ subic r0,$len,32 ++ subi $taillen,$taillen,16 ++ subfe r0,r0,r0 ++ and r0,r0,$taillen ++ add $inp,$inp,r0 ++ ++ lvx $rndkey0,0,$key1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ vperm $inout,$inout,$inptail,$inpperm ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$inout,$tweak ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ mtctr $rounds ++ b Loop_xts_enc ++ ++.align 5 ++Loop_xts_enc: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ bdnz Loop_xts_enc ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ li $idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $rndkey0,$rndkey0,$tweak ++ vcipherlast $output,$inout,$rndkey0 ++ ++ le?vperm $tmp,$output,$output,$leperm ++ be?nop ++ le?stvx_u $tmp,0,$out ++ be?stvx_u $output,0,$out ++ addi $out,$out,16 ++ ++ subic. $len,$len,16 ++ beq Lxts_enc_done ++ ++ vmr $inout,$inptail ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ lvx $rndkey0,0,$key1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ ++ subic r0,$len,32 ++ subfe r0,r0,r0 ++ and r0,r0,$taillen ++ add $inp,$inp,r0 ++ ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $tweak,$tweak,$tmp ++ ++ vperm $inout,$inout,$inptail,$inpperm ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$inout,$tweak ++ vxor $output,$output,$rndkey0 # just in case $len<16 ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ ++ mtctr $rounds ++ ${UCMP}i $len,16 ++ bge Loop_xts_enc ++ ++ vxor $output,$output,$tweak ++ lvsr $inpperm,0,$len # $inpperm is no longer needed ++ vxor $inptail,$inptail,$inptail # $inptail is no longer needed ++ vspltisb $tmp,-1 ++ vperm $inptail,$inptail,$tmp,$inpperm ++ vsel $inout,$inout,$output,$inptail ++ ++ subi r11,$out,17 ++ subi $out,$out,16 ++ mtctr $len ++ li $len,16 ++Loop_xts_enc_steal: ++ lbzu r0,1(r11) ++ stb r0,16(r11) ++ bdnz Loop_xts_enc_steal ++ ++ mtctr $rounds ++ b Loop_xts_enc # one more time... ++ ++Lxts_enc_done: ++ ${UCMP}i $ivp,0 ++ beq Lxts_enc_ret ++ ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $tweak,$tweak,$tmp ++ ++ le?vperm $tweak,$tweak,$tweak,$leperm ++ stvx_u $tweak,0,$ivp ++ ++Lxts_enc_ret: ++ mtspr 256,r12 # restore vrsave ++ li r3,0 ++ blr ++ .long 0 ++ .byte 0,12,0x04,0,0x80,6,6,0 ++ .long 0 ++.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt ++ ++.globl .${prefix}_xts_decrypt ++.align 5 ++.${prefix}_xts_decrypt: ++ mr $inp,r3 # reassign ++ li r3,-1 ++ ${UCMP}i $len,16 ++ bltlr- ++ ++ lis r0,0xfff8 ++ mfspr r12,256 # save vrsave ++ li r11,0 ++ mtspr 256,r0 ++ ++ andi. r0,$len,15 ++ neg r0,r0 ++ andi. r0,r0,16 ++ sub $len,$len,r0 ++ ++ vspltisb $seven,0x07 # 0x070707..07 ++ le?lvsl $leperm,r11,r11 ++ le?vspltisb $tmp,0x0f ++ le?vxor $leperm,$leperm,$seven ++ ++ li $idx,15 ++ lvx $tweak,0,$ivp # load [unaligned] iv ++ lvsl $inpperm,0,$ivp ++ lvx $inptail,$idx,$ivp ++ le?vxor $inpperm,$inpperm,$tmp ++ vperm $tweak,$tweak,$inptail,$inpperm ++ ++ neg r11,$inp ++ lvsr $inpperm,0,r11 # prepare for unaligned load ++ lvx $inout,0,$inp ++ addi $inp,$inp,15 # 15 is not typo ++ le?vxor $inpperm,$inpperm,$tmp ++ ++ ${UCMP}i $key2,0 # key2==NULL? ++ beq Lxts_dec_no_key2 ++ ++ ?lvsl $keyperm,0,$key2 # prepare for unaligned key ++ lwz $rounds,240($key2) ++ srwi $rounds,$rounds,1 ++ subi $rounds,$rounds,1 ++ li $idx,16 ++ ++ lvx $rndkey0,0,$key2 ++ lvx $rndkey1,$idx,$key2 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $tweak,$tweak,$rndkey0 ++ lvx $rndkey0,$idx,$key2 ++ addi $idx,$idx,16 ++ mtctr $rounds ++ ++Ltweak_xts_dec: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $tweak,$tweak,$rndkey1 ++ lvx $rndkey1,$idx,$key2 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipher $tweak,$tweak,$rndkey0 ++ lvx $rndkey0,$idx,$key2 ++ addi $idx,$idx,16 ++ bdnz Ltweak_xts_dec ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vcipher $tweak,$tweak,$rndkey1 ++ lvx $rndkey1,$idx,$key2 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vcipherlast $tweak,$tweak,$rndkey0 ++ ++ li $ivp,0 # don't chain the tweak ++ b Lxts_dec ++ ++Lxts_dec_no_key2: ++ neg $idx,$len ++ andi. $idx,$idx,15 ++ add $len,$len,$idx # in "tweak chaining" ++ # mode only complete ++ # blocks are processed ++Lxts_dec: ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ ++ ?lvsl $keyperm,0,$key1 # prepare for unaligned key ++ lwz $rounds,240($key1) ++ srwi $rounds,$rounds,1 ++ subi $rounds,$rounds,1 ++ li $idx,16 ++ ++ vslb $eighty7,$seven,$seven # 0x808080..80 ++ vor $eighty7,$eighty7,$seven # 0x878787..87 ++ vspltisb $tmp,1 # 0x010101..01 ++ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01 ++ ++ ${UCMP}i $len,96 ++ bge _aesp8_xts_decrypt6x ++ ++ lvx $rndkey0,0,$key1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ vperm $inout,$inout,$inptail,$inpperm ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$inout,$tweak ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ mtctr $rounds ++ ++ ${UCMP}i $len,16 ++ blt Ltail_xts_dec ++ be?b Loop_xts_dec ++ ++.align 5 ++Loop_xts_dec: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vncipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ bdnz Loop_xts_dec ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ li $idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $rndkey0,$rndkey0,$tweak ++ vncipherlast $output,$inout,$rndkey0 ++ ++ le?vperm $tmp,$output,$output,$leperm ++ be?nop ++ le?stvx_u $tmp,0,$out ++ be?stvx_u $output,0,$out ++ addi $out,$out,16 ++ ++ subic. $len,$len,16 ++ beq Lxts_dec_done ++ ++ vmr $inout,$inptail ++ lvx $inptail,0,$inp ++ addi $inp,$inp,16 ++ lvx $rndkey0,0,$key1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $tweak,$tweak,$tmp ++ ++ vperm $inout,$inout,$inptail,$inpperm ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $inout,$inout,$tweak ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ ++ mtctr $rounds ++ ${UCMP}i $len,16 ++ bge Loop_xts_dec ++ ++Ltail_xts_dec: ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak1,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $tweak1,$tweak1,$tmp ++ ++ subi $inp,$inp,16 ++ add $inp,$inp,$len ++ ++ vxor $inout,$inout,$tweak # :-( ++ vxor $inout,$inout,$tweak1 # :-) ++ ++Loop_xts_dec_short: ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vncipher $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ bdnz Loop_xts_dec_short ++ ++ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm ++ vncipher $inout,$inout,$rndkey1 ++ lvx $rndkey1,$idx,$key1 ++ li $idx,16 ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ vxor $rndkey0,$rndkey0,$tweak1 ++ vncipherlast $output,$inout,$rndkey0 ++ ++ le?vperm $tmp,$output,$output,$leperm ++ be?nop ++ le?stvx_u $tmp,0,$out ++ be?stvx_u $output,0,$out ++ ++ vmr $inout,$inptail ++ lvx $inptail,0,$inp ++ #addi $inp,$inp,16 ++ lvx $rndkey0,0,$key1 ++ lvx $rndkey1,$idx,$key1 ++ addi $idx,$idx,16 ++ vperm $inout,$inout,$inptail,$inpperm ++ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm ++ ++ lvsr $inpperm,0,$len # $inpperm is no longer needed ++ vxor $inptail,$inptail,$inptail # $inptail is no longer needed ++ vspltisb $tmp,-1 ++ vperm $inptail,$inptail,$tmp,$inpperm ++ vsel $inout,$inout,$output,$inptail ++ ++ vxor $rndkey0,$rndkey0,$tweak ++ vxor $inout,$inout,$rndkey0 ++ lvx $rndkey0,$idx,$key1 ++ addi $idx,$idx,16 ++ ++ subi r11,$out,1 ++ mtctr $len ++ li $len,16 ++Loop_xts_dec_steal: ++ lbzu r0,1(r11) ++ stb r0,16(r11) ++ bdnz Loop_xts_dec_steal ++ ++ mtctr $rounds ++ b Loop_xts_dec # one more time... ++ ++Lxts_dec_done: ++ ${UCMP}i $ivp,0 ++ beq Lxts_dec_ret ++ ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $tweak,$tweak,$tmp ++ ++ le?vperm $tweak,$tweak,$tweak,$leperm ++ stvx_u $tweak,0,$ivp ++ ++Lxts_dec_ret: ++ mtspr 256,r12 # restore vrsave ++ li r3,0 ++ blr ++ .long 0 ++ .byte 0,12,0x04,0,0x80,6,6,0 ++ .long 0 ++.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt ++___ ++######################################################################### ++{{ # Optimized XTS procedures # ++my $key_=$key2; ++my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31)); ++ $x00=0 if ($flavour =~ /osx/); ++my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5)); ++my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16)); ++my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22)); ++my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys ++ # v26-v31 last 6 round keys ++my ($keyperm)=($out0); # aliases with "caller", redundant assignment ++my $taillen=$x70; ++ ++$code.=<<___; ++.align 5 ++_aesp8_xts_encrypt6x: ++ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) ++ mflr r11 ++ li r7,`$FRAME+8*16+15` ++ li r3,`$FRAME+8*16+31` ++ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp) ++ stvx v20,r7,$sp # ABI says so ++ addi r7,r7,32 ++ stvx v21,r3,$sp ++ addi r3,r3,32 ++ stvx v22,r7,$sp ++ addi r7,r7,32 ++ stvx v23,r3,$sp ++ addi r3,r3,32 ++ stvx v24,r7,$sp ++ addi r7,r7,32 ++ stvx v25,r3,$sp ++ addi r3,r3,32 ++ stvx v26,r7,$sp ++ addi r7,r7,32 ++ stvx v27,r3,$sp ++ addi r3,r3,32 ++ stvx v28,r7,$sp ++ addi r7,r7,32 ++ stvx v29,r3,$sp ++ addi r3,r3,32 ++ stvx v30,r7,$sp ++ stvx v31,r3,$sp ++ li r0,-1 ++ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave ++ li $x10,0x10 ++ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ li $x20,0x20 ++ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ li $x30,0x30 ++ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ li $x40,0x40 ++ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ li $x50,0x50 ++ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ li $x60,0x60 ++ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ li $x70,0x70 ++ mtspr 256,r0 ++ ++ subi $rounds,$rounds,3 # -4 in total ++ ++ lvx $rndkey0,$x00,$key1 # load key schedule ++ lvx v30,$x10,$key1 ++ addi $key1,$key1,0x20 ++ lvx v31,$x00,$key1 ++ ?vperm $rndkey0,$rndkey0,v30,$keyperm ++ addi $key_,$sp,`$FRAME+15` ++ mtctr $rounds ++ ++Load_xts_enc_key: ++ ?vperm v24,v30,v31,$keyperm ++ lvx v30,$x10,$key1 ++ addi $key1,$key1,0x20 ++ stvx v24,$x00,$key_ # off-load round[1] ++ ?vperm v25,v31,v30,$keyperm ++ lvx v31,$x00,$key1 ++ stvx v25,$x10,$key_ # off-load round[2] ++ addi $key_,$key_,0x20 ++ bdnz Load_xts_enc_key ++ ++ lvx v26,$x10,$key1 ++ ?vperm v24,v30,v31,$keyperm ++ lvx v27,$x20,$key1 ++ stvx v24,$x00,$key_ # off-load round[3] ++ ?vperm v25,v31,v26,$keyperm ++ lvx v28,$x30,$key1 ++ stvx v25,$x10,$key_ # off-load round[4] ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ ?vperm v26,v26,v27,$keyperm ++ lvx v29,$x40,$key1 ++ ?vperm v27,v27,v28,$keyperm ++ lvx v30,$x50,$key1 ++ ?vperm v28,v28,v29,$keyperm ++ lvx v31,$x60,$key1 ++ ?vperm v29,v29,v30,$keyperm ++ lvx $twk5,$x70,$key1 # borrow $twk5 ++ ?vperm v30,v30,v31,$keyperm ++ lvx v24,$x00,$key_ # pre-load round[1] ++ ?vperm v31,v31,$twk5,$keyperm ++ lvx v25,$x10,$key_ # pre-load round[2] ++ ++ vperm $in0,$inout,$inptail,$inpperm ++ subi $inp,$inp,31 # undo "caller" ++ vxor $twk0,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $out0,$in0,$twk0 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in1,$x10,$inp ++ vxor $twk1,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in1,$in1,$in1,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out1,$in1,$twk1 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in2,$x20,$inp ++ andi. $taillen,$len,15 ++ vxor $twk2,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in2,$in2,$in2,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out2,$in2,$twk2 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in3,$x30,$inp ++ sub $len,$len,$taillen ++ vxor $twk3,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in3,$in3,$in3,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out3,$in3,$twk3 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in4,$x40,$inp ++ subi $len,$len,0x60 ++ vxor $twk4,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in4,$in4,$in4,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out4,$in4,$twk4 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in5,$x50,$inp ++ addi $inp,$inp,0x60 ++ vxor $twk5,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in5,$in5,$in5,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out5,$in5,$twk5 ++ vxor $tweak,$tweak,$tmp ++ ++ vxor v31,v31,$rndkey0 ++ mtctr $rounds ++ b Loop_xts_enc6x ++ ++.align 5 ++Loop_xts_enc6x: ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vcipher $out4,$out4,v24 ++ vcipher $out5,$out5,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vcipher $out4,$out4,v25 ++ vcipher $out5,$out5,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_xts_enc6x ++ ++ subic $len,$len,96 # $len-=96 ++ vxor $in0,$twk0,v31 # xor with last round key ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk0,$tweak,$rndkey0 ++ vaddubm $tweak,$tweak,$tweak ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vsldoi $tmp,$tmp,$tmp,15 ++ vcipher $out4,$out4,v24 ++ vcipher $out5,$out5,v24 ++ ++ subfe. r0,r0,r0 # borrow?-1:0 ++ vand $tmp,$tmp,$eighty7 ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vxor $tweak,$tweak,$tmp ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vxor $in1,$twk1,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk1,$tweak,$rndkey0 ++ vcipher $out4,$out4,v25 ++ vcipher $out5,$out5,v25 ++ ++ and r0,r0,$len ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vcipher $out0,$out0,v26 ++ vcipher $out1,$out1,v26 ++ vand $tmp,$tmp,$eighty7 ++ vcipher $out2,$out2,v26 ++ vcipher $out3,$out3,v26 ++ vxor $tweak,$tweak,$tmp ++ vcipher $out4,$out4,v26 ++ vcipher $out5,$out5,v26 ++ ++ add $inp,$inp,r0 # $inp is adjusted in such ++ # way that at exit from the ++ # loop inX-in5 are loaded ++ # with last "words" ++ vxor $in2,$twk2,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk2,$tweak,$rndkey0 ++ vaddubm $tweak,$tweak,$tweak ++ vcipher $out0,$out0,v27 ++ vcipher $out1,$out1,v27 ++ vsldoi $tmp,$tmp,$tmp,15 ++ vcipher $out2,$out2,v27 ++ vcipher $out3,$out3,v27 ++ vand $tmp,$tmp,$eighty7 ++ vcipher $out4,$out4,v27 ++ vcipher $out5,$out5,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vxor $tweak,$tweak,$tmp ++ vcipher $out0,$out0,v28 ++ vcipher $out1,$out1,v28 ++ vxor $in3,$twk3,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk3,$tweak,$rndkey0 ++ vcipher $out2,$out2,v28 ++ vcipher $out3,$out3,v28 ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vcipher $out4,$out4,v28 ++ vcipher $out5,$out5,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ vand $tmp,$tmp,$eighty7 ++ ++ vcipher $out0,$out0,v29 ++ vcipher $out1,$out1,v29 ++ vxor $tweak,$tweak,$tmp ++ vcipher $out2,$out2,v29 ++ vcipher $out3,$out3,v29 ++ vxor $in4,$twk4,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk4,$tweak,$rndkey0 ++ vcipher $out4,$out4,v29 ++ vcipher $out5,$out5,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ ++ vcipher $out0,$out0,v30 ++ vcipher $out1,$out1,v30 ++ vand $tmp,$tmp,$eighty7 ++ vcipher $out2,$out2,v30 ++ vcipher $out3,$out3,v30 ++ vxor $tweak,$tweak,$tmp ++ vcipher $out4,$out4,v30 ++ vcipher $out5,$out5,v30 ++ vxor $in5,$twk5,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk5,$tweak,$rndkey0 ++ ++ vcipherlast $out0,$out0,$in0 ++ lvx_u $in0,$x00,$inp # load next input block ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vcipherlast $out1,$out1,$in1 ++ lvx_u $in1,$x10,$inp ++ vcipherlast $out2,$out2,$in2 ++ le?vperm $in0,$in0,$in0,$leperm ++ lvx_u $in2,$x20,$inp ++ vand $tmp,$tmp,$eighty7 ++ vcipherlast $out3,$out3,$in3 ++ le?vperm $in1,$in1,$in1,$leperm ++ lvx_u $in3,$x30,$inp ++ vcipherlast $out4,$out4,$in4 ++ le?vperm $in2,$in2,$in2,$leperm ++ lvx_u $in4,$x40,$inp ++ vxor $tweak,$tweak,$tmp ++ vcipherlast $tmp,$out5,$in5 # last block might be needed ++ # in stealing mode ++ le?vperm $in3,$in3,$in3,$leperm ++ lvx_u $in5,$x50,$inp ++ addi $inp,$inp,0x60 ++ le?vperm $in4,$in4,$in4,$leperm ++ le?vperm $in5,$in5,$in5,$leperm ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk0 ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ vxor $out1,$in1,$twk1 ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ vxor $out2,$in2,$twk2 ++ le?vperm $out4,$out4,$out4,$leperm ++ stvx_u $out3,$x30,$out ++ vxor $out3,$in3,$twk3 ++ le?vperm $out5,$tmp,$tmp,$leperm ++ stvx_u $out4,$x40,$out ++ vxor $out4,$in4,$twk4 ++ le?stvx_u $out5,$x50,$out ++ be?stvx_u $tmp, $x50,$out ++ vxor $out5,$in5,$twk5 ++ addi $out,$out,0x60 ++ ++ mtctr $rounds ++ beq Loop_xts_enc6x # did $len-=96 borrow? ++ ++ addic. $len,$len,0x60 ++ beq Lxts_enc6x_zero ++ cmpwi $len,0x20 ++ blt Lxts_enc6x_one ++ nop ++ beq Lxts_enc6x_two ++ cmpwi $len,0x40 ++ blt Lxts_enc6x_three ++ nop ++ beq Lxts_enc6x_four ++ ++Lxts_enc6x_five: ++ vxor $out0,$in1,$twk0 ++ vxor $out1,$in2,$twk1 ++ vxor $out2,$in3,$twk2 ++ vxor $out3,$in4,$twk3 ++ vxor $out4,$in5,$twk4 ++ ++ bl _aesp8_xts_enc5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk5 # unused tweak ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ vxor $tmp,$out4,$twk5 # last block prep for stealing ++ le?vperm $out4,$out4,$out4,$leperm ++ stvx_u $out3,$x30,$out ++ stvx_u $out4,$x40,$out ++ addi $out,$out,0x50 ++ bne Lxts_enc6x_steal ++ b Lxts_enc6x_done ++ ++.align 4 ++Lxts_enc6x_four: ++ vxor $out0,$in2,$twk0 ++ vxor $out1,$in3,$twk1 ++ vxor $out2,$in4,$twk2 ++ vxor $out3,$in5,$twk3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_enc5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk4 # unused tweak ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ vxor $tmp,$out3,$twk4 # last block prep for stealing ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ stvx_u $out3,$x30,$out ++ addi $out,$out,0x40 ++ bne Lxts_enc6x_steal ++ b Lxts_enc6x_done ++ ++.align 4 ++Lxts_enc6x_three: ++ vxor $out0,$in3,$twk0 ++ vxor $out1,$in4,$twk1 ++ vxor $out2,$in5,$twk2 ++ vxor $out3,$out3,$out3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_enc5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk3 # unused tweak ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $tmp,$out2,$twk3 # last block prep for stealing ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ stvx_u $out2,$x20,$out ++ addi $out,$out,0x30 ++ bne Lxts_enc6x_steal ++ b Lxts_enc6x_done ++ ++.align 4 ++Lxts_enc6x_two: ++ vxor $out0,$in4,$twk0 ++ vxor $out1,$in5,$twk1 ++ vxor $out2,$out2,$out2 ++ vxor $out3,$out3,$out3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_enc5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk2 # unused tweak ++ vxor $tmp,$out1,$twk2 # last block prep for stealing ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ stvx_u $out1,$x10,$out ++ addi $out,$out,0x20 ++ bne Lxts_enc6x_steal ++ b Lxts_enc6x_done ++ ++.align 4 ++Lxts_enc6x_one: ++ vxor $out0,$in5,$twk0 ++ nop ++Loop_xts_enc1x: ++ vcipher $out0,$out0,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vcipher $out0,$out0,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_xts_enc1x ++ ++ add $inp,$inp,$taillen ++ cmpwi $taillen,0 ++ vcipher $out0,$out0,v24 ++ ++ subi $inp,$inp,16 ++ vcipher $out0,$out0,v25 ++ ++ lvsr $inpperm,0,$taillen ++ vcipher $out0,$out0,v26 ++ ++ lvx_u $in0,0,$inp ++ vcipher $out0,$out0,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vcipher $out0,$out0,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ ++ vcipher $out0,$out0,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vxor $twk0,$twk0,v31 ++ ++ le?vperm $in0,$in0,$in0,$leperm ++ vcipher $out0,$out0,v30 ++ ++ vperm $in0,$in0,$in0,$inpperm ++ vcipherlast $out0,$out0,$twk0 ++ ++ vmr $twk0,$twk1 # unused tweak ++ vxor $tmp,$out0,$twk1 # last block prep for stealing ++ le?vperm $out0,$out0,$out0,$leperm ++ stvx_u $out0,$x00,$out # store output ++ addi $out,$out,0x10 ++ bne Lxts_enc6x_steal ++ b Lxts_enc6x_done ++ ++.align 4 ++Lxts_enc6x_zero: ++ cmpwi $taillen,0 ++ beq Lxts_enc6x_done ++ ++ add $inp,$inp,$taillen ++ subi $inp,$inp,16 ++ lvx_u $in0,0,$inp ++ lvsr $inpperm,0,$taillen # $in5 is no more ++ le?vperm $in0,$in0,$in0,$leperm ++ vperm $in0,$in0,$in0,$inpperm ++ vxor $tmp,$tmp,$twk0 ++Lxts_enc6x_steal: ++ vxor $in0,$in0,$twk0 ++ vxor $out0,$out0,$out0 ++ vspltisb $out1,-1 ++ vperm $out0,$out0,$out1,$inpperm ++ vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember? ++ ++ subi r30,$out,17 ++ subi $out,$out,16 ++ mtctr $taillen ++Loop_xts_enc6x_steal: ++ lbzu r0,1(r30) ++ stb r0,16(r30) ++ bdnz Loop_xts_enc6x_steal ++ ++ li $taillen,0 ++ mtctr $rounds ++ b Loop_xts_enc1x # one more time... ++ ++.align 4 ++Lxts_enc6x_done: ++ ${UCMP}i $ivp,0 ++ beq Lxts_enc6x_ret ++ ++ vxor $tweak,$twk0,$rndkey0 ++ le?vperm $tweak,$tweak,$tweak,$leperm ++ stvx_u $tweak,0,$ivp ++ ++Lxts_enc6x_ret: ++ mtlr r11 ++ li r10,`$FRAME+15` ++ li r11,`$FRAME+31` ++ stvx $seven,r10,$sp # wipe copies of round keys ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ ++ mtspr 256,$vrsave ++ lvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ lvx v21,r11,$sp ++ addi r11,r11,32 ++ lvx v22,r10,$sp ++ addi r10,r10,32 ++ lvx v23,r11,$sp ++ addi r11,r11,32 ++ lvx v24,r10,$sp ++ addi r10,r10,32 ++ lvx v25,r11,$sp ++ addi r11,r11,32 ++ lvx v26,r10,$sp ++ addi r10,r10,32 ++ lvx v27,r11,$sp ++ addi r11,r11,32 ++ lvx v28,r10,$sp ++ addi r10,r10,32 ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp ++ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` ++ blr ++ .long 0 ++ .byte 0,12,0x04,1,0x80,6,6,0 ++ .long 0 ++ ++.align 5 ++_aesp8_xts_enc5x: ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vcipher $out4,$out4,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vcipher $out4,$out4,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz _aesp8_xts_enc5x ++ ++ add $inp,$inp,$taillen ++ cmpwi $taillen,0 ++ vcipher $out0,$out0,v24 ++ vcipher $out1,$out1,v24 ++ vcipher $out2,$out2,v24 ++ vcipher $out3,$out3,v24 ++ vcipher $out4,$out4,v24 ++ ++ subi $inp,$inp,16 ++ vcipher $out0,$out0,v25 ++ vcipher $out1,$out1,v25 ++ vcipher $out2,$out2,v25 ++ vcipher $out3,$out3,v25 ++ vcipher $out4,$out4,v25 ++ vxor $twk0,$twk0,v31 ++ ++ vcipher $out0,$out0,v26 ++ lvsr $inpperm,0,$taillen # $in5 is no more ++ vcipher $out1,$out1,v26 ++ vcipher $out2,$out2,v26 ++ vcipher $out3,$out3,v26 ++ vcipher $out4,$out4,v26 ++ vxor $in1,$twk1,v31 ++ ++ vcipher $out0,$out0,v27 ++ lvx_u $in0,0,$inp ++ vcipher $out1,$out1,v27 ++ vcipher $out2,$out2,v27 ++ vcipher $out3,$out3,v27 ++ vcipher $out4,$out4,v27 ++ vxor $in2,$twk2,v31 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vcipher $out0,$out0,v28 ++ vcipher $out1,$out1,v28 ++ vcipher $out2,$out2,v28 ++ vcipher $out3,$out3,v28 ++ vcipher $out4,$out4,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ vxor $in3,$twk3,v31 ++ ++ vcipher $out0,$out0,v29 ++ le?vperm $in0,$in0,$in0,$leperm ++ vcipher $out1,$out1,v29 ++ vcipher $out2,$out2,v29 ++ vcipher $out3,$out3,v29 ++ vcipher $out4,$out4,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vxor $in4,$twk4,v31 ++ ++ vcipher $out0,$out0,v30 ++ vperm $in0,$in0,$in0,$inpperm ++ vcipher $out1,$out1,v30 ++ vcipher $out2,$out2,v30 ++ vcipher $out3,$out3,v30 ++ vcipher $out4,$out4,v30 ++ ++ vcipherlast $out0,$out0,$twk0 ++ vcipherlast $out1,$out1,$in1 ++ vcipherlast $out2,$out2,$in2 ++ vcipherlast $out3,$out3,$in3 ++ vcipherlast $out4,$out4,$in4 ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,0,0 ++ ++.align 5 ++_aesp8_xts_decrypt6x: ++ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) ++ mflr r11 ++ li r7,`$FRAME+8*16+15` ++ li r3,`$FRAME+8*16+31` ++ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp) ++ stvx v20,r7,$sp # ABI says so ++ addi r7,r7,32 ++ stvx v21,r3,$sp ++ addi r3,r3,32 ++ stvx v22,r7,$sp ++ addi r7,r7,32 ++ stvx v23,r3,$sp ++ addi r3,r3,32 ++ stvx v24,r7,$sp ++ addi r7,r7,32 ++ stvx v25,r3,$sp ++ addi r3,r3,32 ++ stvx v26,r7,$sp ++ addi r7,r7,32 ++ stvx v27,r3,$sp ++ addi r3,r3,32 ++ stvx v28,r7,$sp ++ addi r7,r7,32 ++ stvx v29,r3,$sp ++ addi r3,r3,32 ++ stvx v30,r7,$sp ++ stvx v31,r3,$sp ++ li r0,-1 ++ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave ++ li $x10,0x10 ++ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ li $x20,0x20 ++ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ li $x30,0x30 ++ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ li $x40,0x40 ++ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ li $x50,0x50 ++ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ li $x60,0x60 ++ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ li $x70,0x70 ++ mtspr 256,r0 ++ ++ subi $rounds,$rounds,3 # -4 in total ++ ++ lvx $rndkey0,$x00,$key1 # load key schedule ++ lvx v30,$x10,$key1 ++ addi $key1,$key1,0x20 ++ lvx v31,$x00,$key1 ++ ?vperm $rndkey0,$rndkey0,v30,$keyperm ++ addi $key_,$sp,`$FRAME+15` ++ mtctr $rounds ++ ++Load_xts_dec_key: ++ ?vperm v24,v30,v31,$keyperm ++ lvx v30,$x10,$key1 ++ addi $key1,$key1,0x20 ++ stvx v24,$x00,$key_ # off-load round[1] ++ ?vperm v25,v31,v30,$keyperm ++ lvx v31,$x00,$key1 ++ stvx v25,$x10,$key_ # off-load round[2] ++ addi $key_,$key_,0x20 ++ bdnz Load_xts_dec_key ++ ++ lvx v26,$x10,$key1 ++ ?vperm v24,v30,v31,$keyperm ++ lvx v27,$x20,$key1 ++ stvx v24,$x00,$key_ # off-load round[3] ++ ?vperm v25,v31,v26,$keyperm ++ lvx v28,$x30,$key1 ++ stvx v25,$x10,$key_ # off-load round[4] ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ ?vperm v26,v26,v27,$keyperm ++ lvx v29,$x40,$key1 ++ ?vperm v27,v27,v28,$keyperm ++ lvx v30,$x50,$key1 ++ ?vperm v28,v28,v29,$keyperm ++ lvx v31,$x60,$key1 ++ ?vperm v29,v29,v30,$keyperm ++ lvx $twk5,$x70,$key1 # borrow $twk5 ++ ?vperm v30,v30,v31,$keyperm ++ lvx v24,$x00,$key_ # pre-load round[1] ++ ?vperm v31,v31,$twk5,$keyperm ++ lvx v25,$x10,$key_ # pre-load round[2] ++ ++ vperm $in0,$inout,$inptail,$inpperm ++ subi $inp,$inp,31 # undo "caller" ++ vxor $twk0,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vand $tmp,$tmp,$eighty7 ++ vxor $out0,$in0,$twk0 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in1,$x10,$inp ++ vxor $twk1,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in1,$in1,$in1,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out1,$in1,$twk1 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in2,$x20,$inp ++ andi. $taillen,$len,15 ++ vxor $twk2,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in2,$in2,$in2,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out2,$in2,$twk2 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in3,$x30,$inp ++ sub $len,$len,$taillen ++ vxor $twk3,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in3,$in3,$in3,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out3,$in3,$twk3 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in4,$x40,$inp ++ subi $len,$len,0x60 ++ vxor $twk4,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in4,$in4,$in4,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out4,$in4,$twk4 ++ vxor $tweak,$tweak,$tmp ++ ++ lvx_u $in5,$x50,$inp ++ addi $inp,$inp,0x60 ++ vxor $twk5,$tweak,$rndkey0 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ le?vperm $in5,$in5,$in5,$leperm ++ vand $tmp,$tmp,$eighty7 ++ vxor $out5,$in5,$twk5 ++ vxor $tweak,$tweak,$tmp ++ ++ vxor v31,v31,$rndkey0 ++ mtctr $rounds ++ b Loop_xts_dec6x ++ ++.align 5 ++Loop_xts_dec6x: ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_xts_dec6x ++ ++ subic $len,$len,96 # $len-=96 ++ vxor $in0,$twk0,v31 # xor with last round key ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk0,$tweak,$rndkey0 ++ vaddubm $tweak,$tweak,$tweak ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vsldoi $tmp,$tmp,$tmp,15 ++ vncipher $out4,$out4,v24 ++ vncipher $out5,$out5,v24 ++ ++ subfe. r0,r0,r0 # borrow?-1:0 ++ vand $tmp,$tmp,$eighty7 ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vxor $tweak,$tweak,$tmp ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vxor $in1,$twk1,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk1,$tweak,$rndkey0 ++ vncipher $out4,$out4,v25 ++ vncipher $out5,$out5,v25 ++ ++ and r0,r0,$len ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vncipher $out0,$out0,v26 ++ vncipher $out1,$out1,v26 ++ vand $tmp,$tmp,$eighty7 ++ vncipher $out2,$out2,v26 ++ vncipher $out3,$out3,v26 ++ vxor $tweak,$tweak,$tmp ++ vncipher $out4,$out4,v26 ++ vncipher $out5,$out5,v26 ++ ++ add $inp,$inp,r0 # $inp is adjusted in such ++ # way that at exit from the ++ # loop inX-in5 are loaded ++ # with last "words" ++ vxor $in2,$twk2,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk2,$tweak,$rndkey0 ++ vaddubm $tweak,$tweak,$tweak ++ vncipher $out0,$out0,v27 ++ vncipher $out1,$out1,v27 ++ vsldoi $tmp,$tmp,$tmp,15 ++ vncipher $out2,$out2,v27 ++ vncipher $out3,$out3,v27 ++ vand $tmp,$tmp,$eighty7 ++ vncipher $out4,$out4,v27 ++ vncipher $out5,$out5,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vxor $tweak,$tweak,$tmp ++ vncipher $out0,$out0,v28 ++ vncipher $out1,$out1,v28 ++ vxor $in3,$twk3,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk3,$tweak,$rndkey0 ++ vncipher $out2,$out2,v28 ++ vncipher $out3,$out3,v28 ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vncipher $out4,$out4,v28 ++ vncipher $out5,$out5,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ vand $tmp,$tmp,$eighty7 ++ ++ vncipher $out0,$out0,v29 ++ vncipher $out1,$out1,v29 ++ vxor $tweak,$tweak,$tmp ++ vncipher $out2,$out2,v29 ++ vncipher $out3,$out3,v29 ++ vxor $in4,$twk4,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk4,$tweak,$rndkey0 ++ vncipher $out4,$out4,v29 ++ vncipher $out5,$out5,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ ++ vncipher $out0,$out0,v30 ++ vncipher $out1,$out1,v30 ++ vand $tmp,$tmp,$eighty7 ++ vncipher $out2,$out2,v30 ++ vncipher $out3,$out3,v30 ++ vxor $tweak,$tweak,$tmp ++ vncipher $out4,$out4,v30 ++ vncipher $out5,$out5,v30 ++ vxor $in5,$twk5,v31 ++ vsrab $tmp,$tweak,$seven # next tweak value ++ vxor $twk5,$tweak,$rndkey0 ++ ++ vncipherlast $out0,$out0,$in0 ++ lvx_u $in0,$x00,$inp # load next input block ++ vaddubm $tweak,$tweak,$tweak ++ vsldoi $tmp,$tmp,$tmp,15 ++ vncipherlast $out1,$out1,$in1 ++ lvx_u $in1,$x10,$inp ++ vncipherlast $out2,$out2,$in2 ++ le?vperm $in0,$in0,$in0,$leperm ++ lvx_u $in2,$x20,$inp ++ vand $tmp,$tmp,$eighty7 ++ vncipherlast $out3,$out3,$in3 ++ le?vperm $in1,$in1,$in1,$leperm ++ lvx_u $in3,$x30,$inp ++ vncipherlast $out4,$out4,$in4 ++ le?vperm $in2,$in2,$in2,$leperm ++ lvx_u $in4,$x40,$inp ++ vxor $tweak,$tweak,$tmp ++ vncipherlast $out5,$out5,$in5 ++ le?vperm $in3,$in3,$in3,$leperm ++ lvx_u $in5,$x50,$inp ++ addi $inp,$inp,0x60 ++ le?vperm $in4,$in4,$in4,$leperm ++ le?vperm $in5,$in5,$in5,$leperm ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk0 ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ vxor $out1,$in1,$twk1 ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ vxor $out2,$in2,$twk2 ++ le?vperm $out4,$out4,$out4,$leperm ++ stvx_u $out3,$x30,$out ++ vxor $out3,$in3,$twk3 ++ le?vperm $out5,$out5,$out5,$leperm ++ stvx_u $out4,$x40,$out ++ vxor $out4,$in4,$twk4 ++ stvx_u $out5,$x50,$out ++ vxor $out5,$in5,$twk5 ++ addi $out,$out,0x60 ++ ++ mtctr $rounds ++ beq Loop_xts_dec6x # did $len-=96 borrow? ++ ++ addic. $len,$len,0x60 ++ beq Lxts_dec6x_zero ++ cmpwi $len,0x20 ++ blt Lxts_dec6x_one ++ nop ++ beq Lxts_dec6x_two ++ cmpwi $len,0x40 ++ blt Lxts_dec6x_three ++ nop ++ beq Lxts_dec6x_four ++ ++Lxts_dec6x_five: ++ vxor $out0,$in1,$twk0 ++ vxor $out1,$in2,$twk1 ++ vxor $out2,$in3,$twk2 ++ vxor $out3,$in4,$twk3 ++ vxor $out4,$in5,$twk4 ++ ++ bl _aesp8_xts_dec5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk5 # unused tweak ++ vxor $twk1,$tweak,$rndkey0 ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk1 ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ le?vperm $out4,$out4,$out4,$leperm ++ stvx_u $out3,$x30,$out ++ stvx_u $out4,$x40,$out ++ addi $out,$out,0x50 ++ bne Lxts_dec6x_steal ++ b Lxts_dec6x_done ++ ++.align 4 ++Lxts_dec6x_four: ++ vxor $out0,$in2,$twk0 ++ vxor $out1,$in3,$twk1 ++ vxor $out2,$in4,$twk2 ++ vxor $out3,$in5,$twk3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_dec5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk4 # unused tweak ++ vmr $twk1,$twk5 ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk5 ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ le?vperm $out3,$out3,$out3,$leperm ++ stvx_u $out2,$x20,$out ++ stvx_u $out3,$x30,$out ++ addi $out,$out,0x40 ++ bne Lxts_dec6x_steal ++ b Lxts_dec6x_done ++ ++.align 4 ++Lxts_dec6x_three: ++ vxor $out0,$in3,$twk0 ++ vxor $out1,$in4,$twk1 ++ vxor $out2,$in5,$twk2 ++ vxor $out3,$out3,$out3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_dec5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk3 # unused tweak ++ vmr $twk1,$twk4 ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk4 ++ le?vperm $out2,$out2,$out2,$leperm ++ stvx_u $out1,$x10,$out ++ stvx_u $out2,$x20,$out ++ addi $out,$out,0x30 ++ bne Lxts_dec6x_steal ++ b Lxts_dec6x_done ++ ++.align 4 ++Lxts_dec6x_two: ++ vxor $out0,$in4,$twk0 ++ vxor $out1,$in5,$twk1 ++ vxor $out2,$out2,$out2 ++ vxor $out3,$out3,$out3 ++ vxor $out4,$out4,$out4 ++ ++ bl _aesp8_xts_dec5x ++ ++ le?vperm $out0,$out0,$out0,$leperm ++ vmr $twk0,$twk2 # unused tweak ++ vmr $twk1,$twk3 ++ le?vperm $out1,$out1,$out1,$leperm ++ stvx_u $out0,$x00,$out # store output ++ vxor $out0,$in0,$twk3 ++ stvx_u $out1,$x10,$out ++ addi $out,$out,0x20 ++ bne Lxts_dec6x_steal ++ b Lxts_dec6x_done ++ ++.align 4 ++Lxts_dec6x_one: ++ vxor $out0,$in5,$twk0 ++ nop ++Loop_xts_dec1x: ++ vncipher $out0,$out0,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out0,$out0,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Loop_xts_dec1x ++ ++ subi r0,$taillen,1 ++ vncipher $out0,$out0,v24 ++ ++ andi. r0,r0,16 ++ cmpwi $taillen,0 ++ vncipher $out0,$out0,v25 ++ ++ sub $inp,$inp,r0 ++ vncipher $out0,$out0,v26 ++ ++ lvx_u $in0,0,$inp ++ vncipher $out0,$out0,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vncipher $out0,$out0,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ ++ vncipher $out0,$out0,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vxor $twk0,$twk0,v31 ++ ++ le?vperm $in0,$in0,$in0,$leperm ++ vncipher $out0,$out0,v30 ++ ++ mtctr $rounds ++ vncipherlast $out0,$out0,$twk0 ++ ++ vmr $twk0,$twk1 # unused tweak ++ vmr $twk1,$twk2 ++ le?vperm $out0,$out0,$out0,$leperm ++ stvx_u $out0,$x00,$out # store output ++ addi $out,$out,0x10 ++ vxor $out0,$in0,$twk2 ++ bne Lxts_dec6x_steal ++ b Lxts_dec6x_done ++ ++.align 4 ++Lxts_dec6x_zero: ++ cmpwi $taillen,0 ++ beq Lxts_dec6x_done ++ ++ lvx_u $in0,0,$inp ++ le?vperm $in0,$in0,$in0,$leperm ++ vxor $out0,$in0,$twk1 ++Lxts_dec6x_steal: ++ vncipher $out0,$out0,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out0,$out0,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz Lxts_dec6x_steal ++ ++ add $inp,$inp,$taillen ++ vncipher $out0,$out0,v24 ++ ++ cmpwi $taillen,0 ++ vncipher $out0,$out0,v25 ++ ++ lvx_u $in0,0,$inp ++ vncipher $out0,$out0,v26 ++ ++ lvsr $inpperm,0,$taillen # $in5 is no more ++ vncipher $out0,$out0,v27 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vncipher $out0,$out0,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ ++ vncipher $out0,$out0,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vxor $twk1,$twk1,v31 ++ ++ le?vperm $in0,$in0,$in0,$leperm ++ vncipher $out0,$out0,v30 ++ ++ vperm $in0,$in0,$in0,$inpperm ++ vncipherlast $tmp,$out0,$twk1 ++ ++ le?vperm $out0,$tmp,$tmp,$leperm ++ le?stvx_u $out0,0,$out ++ be?stvx_u $tmp,0,$out ++ ++ vxor $out0,$out0,$out0 ++ vspltisb $out1,-1 ++ vperm $out0,$out0,$out1,$inpperm ++ vsel $out0,$in0,$tmp,$out0 ++ vxor $out0,$out0,$twk0 ++ ++ subi r30,$out,1 ++ mtctr $taillen ++Loop_xts_dec6x_steal: ++ lbzu r0,1(r30) ++ stb r0,16(r30) ++ bdnz Loop_xts_dec6x_steal ++ ++ li $taillen,0 ++ mtctr $rounds ++ b Loop_xts_dec1x # one more time... ++ ++.align 4 ++Lxts_dec6x_done: ++ ${UCMP}i $ivp,0 ++ beq Lxts_dec6x_ret ++ ++ vxor $tweak,$twk0,$rndkey0 ++ le?vperm $tweak,$tweak,$tweak,$leperm ++ stvx_u $tweak,0,$ivp ++ ++Lxts_dec6x_ret: ++ mtlr r11 ++ li r10,`$FRAME+15` ++ li r11,`$FRAME+31` ++ stvx $seven,r10,$sp # wipe copies of round keys ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ stvx $seven,r10,$sp ++ addi r10,r10,32 ++ stvx $seven,r11,$sp ++ addi r11,r11,32 ++ ++ mtspr 256,$vrsave ++ lvx v20,r10,$sp # ABI says so ++ addi r10,r10,32 ++ lvx v21,r11,$sp ++ addi r11,r11,32 ++ lvx v22,r10,$sp ++ addi r10,r10,32 ++ lvx v23,r11,$sp ++ addi r11,r11,32 ++ lvx v24,r10,$sp ++ addi r10,r10,32 ++ lvx v25,r11,$sp ++ addi r11,r11,32 ++ lvx v26,r10,$sp ++ addi r10,r10,32 ++ lvx v27,r11,$sp ++ addi r11,r11,32 ++ lvx v28,r10,$sp ++ addi r10,r10,32 ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp ++ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) ++ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) ++ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) ++ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) ++ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) ++ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) ++ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` ++ blr ++ .long 0 ++ .byte 0,12,0x04,1,0x80,6,6,0 ++ .long 0 ++ ++.align 5 ++_aesp8_xts_dec5x: ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ lvx v24,$x20,$key_ # round[3] ++ addi $key_,$key_,0x20 ++ ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ lvx v25,$x10,$key_ # round[4] ++ bdnz _aesp8_xts_dec5x ++ ++ subi r0,$taillen,1 ++ vncipher $out0,$out0,v24 ++ vncipher $out1,$out1,v24 ++ vncipher $out2,$out2,v24 ++ vncipher $out3,$out3,v24 ++ vncipher $out4,$out4,v24 ++ ++ andi. r0,r0,16 ++ cmpwi $taillen,0 ++ vncipher $out0,$out0,v25 ++ vncipher $out1,$out1,v25 ++ vncipher $out2,$out2,v25 ++ vncipher $out3,$out3,v25 ++ vncipher $out4,$out4,v25 ++ vxor $twk0,$twk0,v31 ++ ++ sub $inp,$inp,r0 ++ vncipher $out0,$out0,v26 ++ vncipher $out1,$out1,v26 ++ vncipher $out2,$out2,v26 ++ vncipher $out3,$out3,v26 ++ vncipher $out4,$out4,v26 ++ vxor $in1,$twk1,v31 ++ ++ vncipher $out0,$out0,v27 ++ lvx_u $in0,0,$inp ++ vncipher $out1,$out1,v27 ++ vncipher $out2,$out2,v27 ++ vncipher $out3,$out3,v27 ++ vncipher $out4,$out4,v27 ++ vxor $in2,$twk2,v31 ++ ++ addi $key_,$sp,`$FRAME+15` # rewind $key_ ++ vncipher $out0,$out0,v28 ++ vncipher $out1,$out1,v28 ++ vncipher $out2,$out2,v28 ++ vncipher $out3,$out3,v28 ++ vncipher $out4,$out4,v28 ++ lvx v24,$x00,$key_ # re-pre-load round[1] ++ vxor $in3,$twk3,v31 ++ ++ vncipher $out0,$out0,v29 ++ le?vperm $in0,$in0,$in0,$leperm ++ vncipher $out1,$out1,v29 ++ vncipher $out2,$out2,v29 ++ vncipher $out3,$out3,v29 ++ vncipher $out4,$out4,v29 ++ lvx v25,$x10,$key_ # re-pre-load round[2] ++ vxor $in4,$twk4,v31 ++ ++ vncipher $out0,$out0,v30 ++ vncipher $out1,$out1,v30 ++ vncipher $out2,$out2,v30 ++ vncipher $out3,$out3,v30 ++ vncipher $out4,$out4,v30 ++ ++ vncipherlast $out0,$out0,$twk0 ++ vncipherlast $out1,$out1,$in1 ++ vncipherlast $out2,$out2,$in2 ++ vncipherlast $out3,$out3,$in3 ++ vncipherlast $out4,$out4,$in4 ++ mtctr $rounds ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,0,0 ++___ ++}} }}} ++ ++my $consts=1; ++foreach(split("\n",$code)) { ++ s/\`([^\`]*)\`/eval($1)/geo; ++ ++ # constants table endian-specific conversion ++ if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) { ++ my $conv=$3; ++ my @bytes=(); ++ ++ # convert to endian-agnostic format ++ if ($1 eq "long") { ++ foreach (split(/,\s*/,$2)) { ++ my $l = /^0/?oct:int; ++ push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff; ++ } ++ } else { ++ @bytes = map(/^0/?oct:int,split(/,\s*/,$2)); ++ } ++ ++ # little-endian conversion ++ if ($flavour =~ /le$/o) { ++ SWITCH: for($conv) { ++ /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; ++ /\?rev/ && do { @bytes=reverse(@bytes); last; }; ++ } ++ } ++ ++ #emit ++ print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n"; ++ next; ++ } ++ $consts=0 if (m/Lconsts:/o); # end of table ++ ++ # instructions prefixed with '?' are endian-specific and need ++ # to be adjusted accordingly... ++ if ($flavour =~ /le$/o) { # little-endian ++ s/le\?//o or ++ s/be\?/#be#/o or ++ s/\?lvsr/lvsl/o or ++ s/\?lvsl/lvsr/o or ++ s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or ++ s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or ++ s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o; ++ } else { # big-endian ++ s/le\?/#le#/o or ++ s/be\?//o or ++ s/\?([a-z]+)/$1/o; ++ } ++ ++ print $_,"\n"; ++} ++ ++close STDOUT or die "error closing STDOUT: $!"; +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/aes/internal.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/aes/internal.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/aes/internal.h +@@ -59,6 +59,12 @@ OPENSSL_INLINE int vpaes_capable(void) { + OPENSSL_INLINE int vpaes_capable(void) { return CRYPTO_is_NEON_capable(); } + #endif + ++#elif defined(OPENSSL_PPC64LE) ++#define HWAES ++ ++OPENSSL_INLINE int hwaes_capable(void) { ++ return CRYPTO_is_PPC64LE_vcrypto_capable(); ++} + #endif + + #endif // !NO_ASM +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/bcm.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/bcm.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/bcm.c +@@ -102,6 +102,7 @@ + #include "self_check/fips.c" + #include "self_check/self_check.c" + #include "service_indicator/service_indicator.c" ++#include "sha/sha1-altivec.c" + #include "sha/sha1.c" + #include "sha/sha256.c" + #include "sha/sha512.c" +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/bn/bn.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/bn/bn.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/bn/bn.c +@@ -384,6 +384,23 @@ int bn_expand(BIGNUM *bn, size_t bits) { + } + + int bn_resize_words(BIGNUM *bn, size_t words) { ++#if defined(OPENSSL_PPC64LE) ++ // This is a workaround for a miscompilation bug in Clang 7.0.1 on POWER. ++ // The unittests catch the miscompilation, if it occurs, and it manifests ++ // as a crash in |bn_fits_in_words|. ++ // ++ // The bug only triggers if building in FIPS mode and with -O3. Clang 8.0.1 ++ // has the same bug but this workaround is not effective there---I've not ++ // been able to find a workaround for 8.0.1. ++ // ++ // At the time of writing (2019-08-08), Clang git does *not* have this bug ++ // and does not need this workaroud. The current git version should go on to ++ // be Clang 10 thus, once we can depend on that, this can be removed. ++ if (value_barrier_w((size_t)bn->width == words)) { ++ return 1; ++ } ++#endif ++ + if ((size_t)bn->width <= words) { + if (!bn_wexpand(bn, words)) { + return 0; +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/cipher/e_aes.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/cipher/e_aes.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/cipher/e_aes.c +@@ -1455,6 +1455,8 @@ int EVP_has_aes_hardware(void) { + return hwaes_capable() && crypto_gcm_clmul_enabled(); + #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) + return hwaes_capable() && CRYPTO_is_ARMv8_PMULL_capable(); ++#elif defined(OPENSSL_PPC64LE) ++ return CRYPTO_is_PPC64LE_vcrypto_capable(); + #else + return 0; + #endif +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/asm/ghashp8-ppc.pl +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/asm/ghashp8-ppc.pl +@@ -0,0 +1,671 @@ ++#! /usr/bin/env perl ++# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the OpenSSL license (the "License"). You may not use ++# this file except in compliance with the License. You can obtain a copy ++# in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++ ++# ++# ==================================================================== ++# Written by Andy Polyakov for the OpenSSL ++# project. The module is, however, dual licensed under OpenSSL and ++# CRYPTOGAMS licenses depending on where you obtain it. For further ++# details see http://www.openssl.org/~appro/cryptogams/. ++# ==================================================================== ++# ++# GHASH for for PowerISA v2.07. ++# ++# July 2014 ++# ++# Accurate performance measurements are problematic, because it's ++# always virtualized setup with possibly throttled processor. ++# Relative comparison is therefore more informative. This initial ++# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x ++# faster than "4-bit" integer-only compiler-generated 64-bit code. ++# "Initial version" means that there is room for futher improvement. ++ ++# May 2016 ++# ++# 2x aggregated reduction improves performance by 50% (resulting ++# performance on POWER8 is 1 cycle per processed byte), and 4x ++# aggregated reduction - by 170% or 2.7x (resulting in 0.55 cpb). ++ ++$flavour=shift; ++$output =shift; ++ ++if ($flavour =~ /64/) { ++ $SIZE_T=8; ++ $LRSAVE=2*$SIZE_T; ++ $STU="stdu"; ++ $POP="ld"; ++ $PUSH="std"; ++ $UCMP="cmpld"; ++ $SHRI="srdi"; ++} elsif ($flavour =~ /32/) { ++ $SIZE_T=4; ++ $LRSAVE=$SIZE_T; ++ $STU="stwu"; ++ $POP="lwz"; ++ $PUSH="stw"; ++ $UCMP="cmplw"; ++ $SHRI="srwi"; ++} else { die "nonsense $flavour"; } ++ ++$sp="r1"; ++$FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload ++ ++$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ++( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ++( $xlate="${dir}../../../perlasm/ppc-xlate.pl" and -f $xlate) or ++die "can't locate ppc-xlate.pl"; ++ ++open OUT,"| $^X \"$xlate\" $flavour \"$output\"" || die "can't call $xlate: $!"; ++*STDOUT=*OUT; ++ ++my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block ++ ++my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3)); ++my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12)); ++my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19)); ++my $vrsave="r12"; ++ ++$code=<<___; ++.machine "any" ++ ++.text ++ ++.globl .gcm_init_p8 ++.align 5 ++.gcm_init_p8: ++ li r0,-4096 ++ li r8,0x10 ++ mfspr $vrsave,256 ++ li r9,0x20 ++ mtspr 256,r0 ++ li r10,0x30 ++ lvx_u $H,0,r4 # load H ++ ++ vspltisb $xC2,-16 # 0xf0 ++ vspltisb $t0,1 # one ++ vaddubm $xC2,$xC2,$xC2 # 0xe0 ++ vxor $zero,$zero,$zero ++ vor $xC2,$xC2,$t0 # 0xe1 ++ vsldoi $xC2,$xC2,$zero,15 # 0xe1... ++ vsldoi $t1,$zero,$t0,1 # ...1 ++ vaddubm $xC2,$xC2,$xC2 # 0xc2... ++ vspltisb $t2,7 ++ vor $xC2,$xC2,$t1 # 0xc2....01 ++ vspltb $t1,$H,0 # most significant byte ++ vsl $H,$H,$t0 # H<<=1 ++ vsrab $t1,$t1,$t2 # broadcast carry bit ++ vand $t1,$t1,$xC2 ++ vxor $IN,$H,$t1 # twisted H ++ ++ vsldoi $H,$IN,$IN,8 # twist even more ... ++ vsldoi $xC2,$zero,$xC2,8 # 0xc2.0 ++ vsldoi $Hl,$zero,$H,8 # ... and split ++ vsldoi $Hh,$H,$zero,8 ++ ++ stvx_u $xC2,0,r3 # save pre-computed table ++ stvx_u $Hl,r8,r3 ++ li r8,0x40 ++ stvx_u $H, r9,r3 ++ li r9,0x50 ++ stvx_u $Hh,r10,r3 ++ li r10,0x60 ++ ++ vpmsumd $Xl,$IN,$Hl # H.lo·H.lo ++ vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi ++ vpmsumd $Xh,$IN,$Hh # H.hi·H.hi ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ vxor $t1,$t1,$Xh ++ vxor $IN1,$Xl,$t1 ++ ++ vsldoi $H2,$IN1,$IN1,8 ++ vsldoi $H2l,$zero,$H2,8 ++ vsldoi $H2h,$H2,$zero,8 ++ ++ stvx_u $H2l,r8,r3 # save H^2 ++ li r8,0x70 ++ stvx_u $H2,r9,r3 ++ li r9,0x80 ++ stvx_u $H2h,r10,r3 ++ li r10,0x90 ++___ ++{ ++my ($t4,$t5,$t6) = ($Hl,$H,$Hh); ++$code.=<<___; ++ vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo ++ vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo ++ vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi ++ vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi ++ vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi ++ vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vsldoi $t4,$Xm1,$zero,8 ++ vsldoi $t5,$zero,$Xm1,8 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ vxor $Xl1,$Xl1,$t4 ++ vxor $Xh1,$Xh1,$t5 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vsldoi $Xl1,$Xl1,$Xl1,8 ++ vxor $Xl,$Xl,$t2 ++ vxor $Xl1,$Xl1,$t6 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ vpmsumd $Xl1,$Xl1,$xC2 ++ vxor $t1,$t1,$Xh ++ vxor $t5,$t5,$Xh1 ++ vxor $Xl,$Xl,$t1 ++ vxor $Xl1,$Xl1,$t5 ++ ++ vsldoi $H,$Xl,$Xl,8 ++ vsldoi $H2,$Xl1,$Xl1,8 ++ vsldoi $Hl,$zero,$H,8 ++ vsldoi $Hh,$H,$zero,8 ++ vsldoi $H2l,$zero,$H2,8 ++ vsldoi $H2h,$H2,$zero,8 ++ ++ stvx_u $Hl,r8,r3 # save H^3 ++ li r8,0xa0 ++ stvx_u $H,r9,r3 ++ li r9,0xb0 ++ stvx_u $Hh,r10,r3 ++ li r10,0xc0 ++ stvx_u $H2l,r8,r3 # save H^4 ++ stvx_u $H2,r9,r3 ++ stvx_u $H2h,r10,r3 ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,2,0 ++ .long 0 ++.size .gcm_init_p8,.-.gcm_init_p8 ++___ ++} ++$code.=<<___; ++.globl .gcm_gmult_p8 ++.align 5 ++.gcm_gmult_p8: ++ lis r0,0xfff8 ++ li r8,0x10 ++ mfspr $vrsave,256 ++ li r9,0x20 ++ mtspr 256,r0 ++ li r10,0x30 ++ lvx_u $IN,0,$Xip # load Xi ++ ++ lvx_u $Hl,r8,$Htbl # load pre-computed table ++ le?lvsl $lemask,r0,r0 ++ lvx_u $H, r9,$Htbl ++ le?vspltisb $t0,0x07 ++ lvx_u $Hh,r10,$Htbl ++ le?vxor $lemask,$lemask,$t0 ++ lvx_u $xC2,0,$Htbl ++ le?vperm $IN,$IN,$IN,$lemask ++ vxor $zero,$zero,$zero ++ ++ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo ++ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi ++ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ vxor $t1,$t1,$Xh ++ vxor $Xl,$Xl,$t1 ++ ++ le?vperm $Xl,$Xl,$Xl,$lemask ++ stvx_u $Xl,0,$Xip # write out Xi ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,2,0 ++ .long 0 ++.size .gcm_gmult_p8,.-.gcm_gmult_p8 ++ ++.globl .gcm_ghash_p8 ++.align 5 ++.gcm_ghash_p8: ++ li r0,-4096 ++ li r8,0x10 ++ mfspr $vrsave,256 ++ li r9,0x20 ++ mtspr 256,r0 ++ li r10,0x30 ++ lvx_u $Xl,0,$Xip # load Xi ++ ++ lvx_u $Hl,r8,$Htbl # load pre-computed table ++ li r8,0x40 ++ le?lvsl $lemask,r0,r0 ++ lvx_u $H, r9,$Htbl ++ li r9,0x50 ++ le?vspltisb $t0,0x07 ++ lvx_u $Hh,r10,$Htbl ++ li r10,0x60 ++ le?vxor $lemask,$lemask,$t0 ++ lvx_u $xC2,0,$Htbl ++ le?vperm $Xl,$Xl,$Xl,$lemask ++ vxor $zero,$zero,$zero ++ ++ ${UCMP}i $len,64 ++ bge Lgcm_ghash_p8_4x ++ ++ lvx_u $IN,0,$inp ++ addi $inp,$inp,16 ++ subic. $len,$len,16 ++ le?vperm $IN,$IN,$IN,$lemask ++ vxor $IN,$IN,$Xl ++ beq Lshort ++ ++ lvx_u $H2l,r8,$Htbl # load H^2 ++ li r8,16 ++ lvx_u $H2, r9,$Htbl ++ add r9,$inp,$len # end of input ++ lvx_u $H2h,r10,$Htbl ++ be?b Loop_2x ++ ++.align 5 ++Loop_2x: ++ lvx_u $IN1,0,$inp ++ le?vperm $IN1,$IN1,$IN1,$lemask ++ ++ subic $len,$len,32 ++ vpmsumd $Xl,$IN,$H2l # H^2.lo·Xi.lo ++ vpmsumd $Xl1,$IN1,$Hl # H.lo·Xi+1.lo ++ subfe r0,r0,r0 # borrow?-1:0 ++ vpmsumd $Xm,$IN,$H2 # H^2.hi·Xi.lo+H^2.lo·Xi.hi ++ vpmsumd $Xm1,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+1.hi ++ and r0,r0,$len ++ vpmsumd $Xh,$IN,$H2h # H^2.hi·Xi.hi ++ vpmsumd $Xh1,$IN1,$Hh # H.hi·Xi+1.hi ++ add $inp,$inp,r0 ++ ++ vxor $Xl,$Xl,$Xl1 ++ vxor $Xm,$Xm,$Xm1 ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xh,$Xh,$Xh1 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ lvx_u $IN,r8,$inp ++ addi $inp,$inp,32 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ le?vperm $IN,$IN,$IN,$lemask ++ vxor $t1,$t1,$Xh ++ vxor $IN,$IN,$t1 ++ vxor $IN,$IN,$Xl ++ $UCMP r9,$inp ++ bgt Loop_2x # done yet? ++ ++ cmplwi $len,0 ++ bne Leven ++ ++Lshort: ++ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo ++ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi ++ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ vxor $t1,$t1,$Xh ++ ++Leven: ++ vxor $Xl,$Xl,$t1 ++ le?vperm $Xl,$Xl,$Xl,$lemask ++ stvx_u $Xl,0,$Xip # write out Xi ++ ++ mtspr 256,$vrsave ++ blr ++ .long 0 ++ .byte 0,12,0x14,0,0,0,4,0 ++ .long 0 ++___ ++{ ++my ($Xl3,$Xm2,$IN2,$H3l,$H3,$H3h, ++ $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31)); ++my $IN0=$IN; ++my ($H21l,$H21h,$loperm,$hiperm) = ($Hl,$Hh,$H2l,$H2h); ++ ++$code.=<<___; ++.align 5 ++.gcm_ghash_p8_4x: ++Lgcm_ghash_p8_4x: ++ $STU $sp,-$FRAME($sp) ++ li r10,`15+6*$SIZE_T` ++ li r11,`31+6*$SIZE_T` ++ stvx v20,r10,$sp ++ addi r10,r10,32 ++ stvx v21,r11,$sp ++ addi r11,r11,32 ++ stvx v22,r10,$sp ++ addi r10,r10,32 ++ stvx v23,r11,$sp ++ addi r11,r11,32 ++ stvx v24,r10,$sp ++ addi r10,r10,32 ++ stvx v25,r11,$sp ++ addi r11,r11,32 ++ stvx v26,r10,$sp ++ addi r10,r10,32 ++ stvx v27,r11,$sp ++ addi r11,r11,32 ++ stvx v28,r10,$sp ++ addi r10,r10,32 ++ stvx v29,r11,$sp ++ addi r11,r11,32 ++ stvx v30,r10,$sp ++ li r10,0x60 ++ stvx v31,r11,$sp ++ li r0,-1 ++ stw $vrsave,`$FRAME-4`($sp) # save vrsave ++ mtspr 256,r0 # preserve all AltiVec registers ++ ++ lvsl $t0,0,r8 # 0x0001..0e0f ++ #lvx_u $H2l,r8,$Htbl # load H^2 ++ li r8,0x70 ++ lvx_u $H2, r9,$Htbl ++ li r9,0x80 ++ vspltisb $t1,8 # 0x0808..0808 ++ #lvx_u $H2h,r10,$Htbl ++ li r10,0x90 ++ lvx_u $H3l,r8,$Htbl # load H^3 ++ li r8,0xa0 ++ lvx_u $H3, r9,$Htbl ++ li r9,0xb0 ++ lvx_u $H3h,r10,$Htbl ++ li r10,0xc0 ++ lvx_u $H4l,r8,$Htbl # load H^4 ++ li r8,0x10 ++ lvx_u $H4, r9,$Htbl ++ li r9,0x20 ++ lvx_u $H4h,r10,$Htbl ++ li r10,0x30 ++ ++ vsldoi $t2,$zero,$t1,8 # 0x0000..0808 ++ vaddubm $hiperm,$t0,$t2 # 0x0001..1617 ++ vaddubm $loperm,$t1,$hiperm # 0x0809..1e1f ++ ++ $SHRI $len,$len,4 # this allows to use sign bit ++ # as carry ++ lvx_u $IN0,0,$inp # load input ++ lvx_u $IN1,r8,$inp ++ subic. $len,$len,8 ++ lvx_u $IN2,r9,$inp ++ lvx_u $IN3,r10,$inp ++ addi $inp,$inp,0x40 ++ le?vperm $IN0,$IN0,$IN0,$lemask ++ le?vperm $IN1,$IN1,$IN1,$lemask ++ le?vperm $IN2,$IN2,$IN2,$lemask ++ le?vperm $IN3,$IN3,$IN3,$lemask ++ ++ vxor $Xh,$IN0,$Xl ++ ++ vpmsumd $Xl1,$IN1,$H3l ++ vpmsumd $Xm1,$IN1,$H3 ++ vpmsumd $Xh1,$IN1,$H3h ++ ++ vperm $H21l,$H2,$H,$hiperm ++ vperm $t0,$IN2,$IN3,$loperm ++ vperm $H21h,$H2,$H,$loperm ++ vperm $t1,$IN2,$IN3,$hiperm ++ vpmsumd $Xm2,$IN2,$H2 # H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo ++ vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+2.lo+H.lo·Xi+3.lo ++ vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi ++ vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+2.hi+H.hi·Xi+3.hi ++ ++ vxor $Xm2,$Xm2,$Xm1 ++ vxor $Xl3,$Xl3,$Xl1 ++ vxor $Xm3,$Xm3,$Xm2 ++ vxor $Xh3,$Xh3,$Xh1 ++ ++ blt Ltail_4x ++ ++Loop_4x: ++ lvx_u $IN0,0,$inp ++ lvx_u $IN1,r8,$inp ++ subic. $len,$len,4 ++ lvx_u $IN2,r9,$inp ++ lvx_u $IN3,r10,$inp ++ addi $inp,$inp,0x40 ++ le?vperm $IN1,$IN1,$IN1,$lemask ++ le?vperm $IN2,$IN2,$IN2,$lemask ++ le?vperm $IN3,$IN3,$IN3,$lemask ++ le?vperm $IN0,$IN0,$IN0,$lemask ++ ++ vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo ++ vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi ++ vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi ++ vpmsumd $Xl1,$IN1,$H3l ++ vpmsumd $Xm1,$IN1,$H3 ++ vpmsumd $Xh1,$IN1,$H3h ++ ++ vxor $Xl,$Xl,$Xl3 ++ vxor $Xm,$Xm,$Xm3 ++ vxor $Xh,$Xh,$Xh3 ++ vperm $t0,$IN2,$IN3,$loperm ++ vperm $t1,$IN2,$IN3,$hiperm ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ vpmsumd $Xl3,$t0,$H21l # H.lo·Xi+3.lo +H^2.lo·Xi+2.lo ++ vpmsumd $Xh3,$t1,$H21h # H.hi·Xi+3.hi +H^2.hi·Xi+2.hi ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xm2,$IN2,$H2 # H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi ++ vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi ++ vpmsumd $Xl,$Xl,$xC2 ++ ++ vxor $Xl3,$Xl3,$Xl1 ++ vxor $Xh3,$Xh3,$Xh1 ++ vxor $Xh,$Xh,$IN0 ++ vxor $Xm2,$Xm2,$Xm1 ++ vxor $Xh,$Xh,$t1 ++ vxor $Xm3,$Xm3,$Xm2 ++ vxor $Xh,$Xh,$Xl ++ bge Loop_4x ++ ++Ltail_4x: ++ vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo ++ vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi ++ vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi ++ ++ vxor $Xl,$Xl,$Xl3 ++ vxor $Xm,$Xm,$Xm3 ++ ++ vpmsumd $t2,$Xl,$xC2 # 1st reduction phase ++ ++ vsldoi $t0,$Xm,$zero,8 ++ vsldoi $t1,$zero,$Xm,8 ++ vxor $Xh,$Xh,$Xh3 ++ vxor $Xl,$Xl,$t0 ++ vxor $Xh,$Xh,$t1 ++ ++ vsldoi $Xl,$Xl,$Xl,8 ++ vxor $Xl,$Xl,$t2 ++ ++ vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase ++ vpmsumd $Xl,$Xl,$xC2 ++ vxor $t1,$t1,$Xh ++ vxor $Xl,$Xl,$t1 ++ ++ addic. $len,$len,4 ++ beq Ldone_4x ++ ++ lvx_u $IN0,0,$inp ++ ${UCMP}i $len,2 ++ li $len,-4 ++ blt Lone ++ lvx_u $IN1,r8,$inp ++ beq Ltwo ++ ++Lthree: ++ lvx_u $IN2,r9,$inp ++ le?vperm $IN0,$IN0,$IN0,$lemask ++ le?vperm $IN1,$IN1,$IN1,$lemask ++ le?vperm $IN2,$IN2,$IN2,$lemask ++ ++ vxor $Xh,$IN0,$Xl ++ vmr $H4l,$H3l ++ vmr $H4, $H3 ++ vmr $H4h,$H3h ++ ++ vperm $t0,$IN1,$IN2,$loperm ++ vperm $t1,$IN1,$IN2,$hiperm ++ vpmsumd $Xm2,$IN1,$H2 # H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo ++ vpmsumd $Xm3,$IN2,$H # H.hi·Xi+2.lo +H.lo·Xi+2.hi ++ vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+1.lo+H.lo·Xi+2.lo ++ vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+1.hi+H.hi·Xi+2.hi ++ ++ vxor $Xm3,$Xm3,$Xm2 ++ b Ltail_4x ++ ++.align 4 ++Ltwo: ++ le?vperm $IN0,$IN0,$IN0,$lemask ++ le?vperm $IN1,$IN1,$IN1,$lemask ++ ++ vxor $Xh,$IN0,$Xl ++ vperm $t0,$zero,$IN1,$loperm ++ vperm $t1,$zero,$IN1,$hiperm ++ ++ vsldoi $H4l,$zero,$H2,8 ++ vmr $H4, $H2 ++ vsldoi $H4h,$H2,$zero,8 ++ ++ vpmsumd $Xl3,$t0, $H21l # H.lo·Xi+1.lo ++ vpmsumd $Xm3,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+2.hi ++ vpmsumd $Xh3,$t1, $H21h # H.hi·Xi+1.hi ++ ++ b Ltail_4x ++ ++.align 4 ++Lone: ++ le?vperm $IN0,$IN0,$IN0,$lemask ++ ++ vsldoi $H4l,$zero,$H,8 ++ vmr $H4, $H ++ vsldoi $H4h,$H,$zero,8 ++ ++ vxor $Xh,$IN0,$Xl ++ vxor $Xl3,$Xl3,$Xl3 ++ vxor $Xm3,$Xm3,$Xm3 ++ vxor $Xh3,$Xh3,$Xh3 ++ ++ b Ltail_4x ++ ++Ldone_4x: ++ le?vperm $Xl,$Xl,$Xl,$lemask ++ stvx_u $Xl,0,$Xip # write out Xi ++ ++ li r10,`15+6*$SIZE_T` ++ li r11,`31+6*$SIZE_T` ++ mtspr 256,$vrsave ++ lvx v20,r10,$sp ++ addi r10,r10,32 ++ lvx v21,r11,$sp ++ addi r11,r11,32 ++ lvx v22,r10,$sp ++ addi r10,r10,32 ++ lvx v23,r11,$sp ++ addi r11,r11,32 ++ lvx v24,r10,$sp ++ addi r10,r10,32 ++ lvx v25,r11,$sp ++ addi r11,r11,32 ++ lvx v26,r10,$sp ++ addi r10,r10,32 ++ lvx v27,r11,$sp ++ addi r11,r11,32 ++ lvx v28,r10,$sp ++ addi r10,r10,32 ++ lvx v29,r11,$sp ++ addi r11,r11,32 ++ lvx v30,r10,$sp ++ lvx v31,r11,$sp ++ addi $sp,$sp,$FRAME ++ blr ++ .long 0 ++ .byte 0,12,0x04,0,0x80,0,4,0 ++ .long 0 ++___ ++} ++$code.=<<___; ++.size .gcm_ghash_p8,.-.gcm_ghash_p8 ++ ++.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by " ++.align 2 ++___ ++ ++foreach (split("\n",$code)) { ++ s/\`([^\`]*)\`/eval $1/geo; ++ ++ if ($flavour =~ /le$/o) { # little-endian ++ s/le\?//o or ++ s/be\?/#be#/o; ++ } else { ++ s/le\?/#le#/o or ++ s/be\?//o; ++ } ++ print $_,"\n"; ++} ++ ++close STDOUT or die "error closing STDOUT: $!"; # enforce flush +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/gcm.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/modes/gcm.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/gcm.c +@@ -228,6 +228,13 @@ void CRYPTO_ghash_init(gmult_func *out_m + *out_hash = gcm_ghash_neon; + return; + } ++#elif defined(GHASH_ASM_PPC64LE) ++ if (CRYPTO_is_PPC64LE_vcrypto_capable()) { ++ gcm_init_p8(out_table, H); ++ *out_mult = gcm_gmult_p8; ++ *out_hash = gcm_ghash_p8; ++ return; ++ } + #endif + + gcm_init_nohw(out_table, H); +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/gcm_test.cc +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/modes/gcm_test.cc ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/gcm_test.cc +@@ -215,5 +215,15 @@ TEST(GCMTest, ABI) { + } + } + #endif ++ ++#if defined(GHASH_ASM_PPC64LE) ++ if (CRYPTO_is_PPC64LE_vcrypto_capable()) { ++ CHECK_ABI(gcm_init_p8, Htable, kH); ++ CHECK_ABI(gcm_gmult_p8, X, Htable); ++ for (size_t blocks : kBlockCounts) { ++ CHECK_ABI(gcm_ghash_p8, X, Htable, buf, 16 * blocks); ++ } ++ } ++#endif // GHASH_ASM_PPC64LE + } + #endif // SUPPORTS_ABI_TEST && !OPENSSL_NO_ASM +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/internal.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/modes/internal.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/modes/internal.h +@@ -325,6 +325,13 @@ void aes_gcm_dec_kernel(const uint8_t *i + const u128 Htable[16]); + #endif + ++#elif defined(OPENSSL_PPC64LE) ++#define GHASH_ASM_PPC64LE ++#define GCM_FUNCREF ++void gcm_init_p8(u128 Htable[16], const uint64_t Xi[2]); ++void gcm_gmult_p8(uint8_t Xi[16], const u128 Htable[16]); ++void gcm_ghash_p8(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, ++ size_t len); + #endif + #endif // OPENSSL_NO_ASM + +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/rand/getrandom_fillin.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/rand/getrandom_fillin.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/rand/getrandom_fillin.h +@@ -30,6 +30,8 @@ + #define EXPECTED_NR_getrandom 278 + #elif defined(OPENSSL_ARM) + #define EXPECTED_NR_getrandom 384 ++#elif defined(OPENSSL_PPC64LE) ++#define EXPECTED_NR_getrandom 359 + #elif defined(OPENSSL_RISCV64) + #define EXPECTED_NR_getrandom 278 + #endif +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/rand/rand.c +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/rand/rand.c ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/rand/rand.c +@@ -431,6 +431,11 @@ void RAND_bytes_with_additional_data(uin + // Take a read lock around accesses to |state->drbg|. This is needed to + // avoid returning bad entropy if we race with + // |rand_thread_state_clear_all|. ++ // ++ // This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a ++ // bug on ppc64le. glibc may implement pthread locks by wrapping user code ++ // in a hardware transaction, but, on some older versions of glibc and the ++ // kernel, syscalls made with |syscall| did not abort the transaction. + CRYPTO_MUTEX_lock_read(&state->clear_drbg_lock); + #endif + if (!CTR_DRBG_reseed(&state->drbg, seed, reseed_additional_data, +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/sha/internal.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/fipsmodule/sha/internal.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/sha/internal.h +@@ -23,17 +23,25 @@ + extern "C" { + #endif + ++#if defined(OPENSSL_PPC64LE) || \ ++ (!defined(OPENSSL_NO_ASM) && (defined(OPENSSL_X86) || defined(OPENSSL_ARM))) ++// POWER has an intrinsics-based implementation of SHA-1 and thus the functions ++// normally defined in assembly are available even with |OPENSSL_NO_ASM| in ++// this case. ++#define SHA1_ASM ++void sha1_block_data_order(uint32_t *state, const uint8_t *in, ++ size_t num_blocks); ++#endif ++ ++ + // Define SHA{n}[_{variant}]_ASM if sha{n}_block_data_order[_{variant}] is + // defined in assembly. + + #if !defined(OPENSSL_NO_ASM) && (defined(OPENSSL_X86) || defined(OPENSSL_ARM)) + +-#define SHA1_ASM + #define SHA256_ASM + #define SHA512_ASM + +-void sha1_block_data_order(uint32_t *state, const uint8_t *data, +- size_t num_blocks); + void sha256_block_data_order(uint32_t *state, const uint8_t *data, + size_t num_blocks); + void sha512_block_data_order(uint64_t *state, const uint8_t *data, +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/sha/sha1-altivec.c +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/fipsmodule/sha/sha1-altivec.c +@@ -0,0 +1,361 @@ ++/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) ++ * All rights reserved. ++ * ++ * This package is an SSL implementation written ++ * by Eric Young (eay@cryptsoft.com). ++ * The implementation was written so as to conform with Netscapes SSL. ++ * ++ * This library is free for commercial and non-commercial use as long as ++ * the following conditions are aheared to. The following conditions ++ * apply to all code found in this distribution, be it the RC4, RSA, ++ * lhash, DES, etc., code; not just the SSL code. The SSL documentation ++ * included with this distribution is covered by the same copyright terms ++ * except that the holder is Tim Hudson (tjh@cryptsoft.com). ++ * ++ * Copyright remains Eric Young's, and as such any Copyright notices in ++ * the code are not to be removed. ++ * If this package is used in a product, Eric Young should be given attribution ++ * as the author of the parts of the library used. ++ * This can be in the form of a textual message at program startup or ++ * in documentation (online or textual) provided with the package. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the copyright ++ * notice, this list of conditions and the following disclaimer. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * 3. All advertising materials mentioning features or use of this software ++ * must display the following acknowledgement: ++ * "This product includes cryptographic software written by ++ * Eric Young (eay@cryptsoft.com)" ++ * The word 'cryptographic' can be left out if the rouines from the library ++ * being used are not cryptographic related :-). ++ * 4. If you include any Windows specific code (or a derivative thereof) from ++ * the apps directory (application code) you must include an acknowledgement: ++ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" ++ * ++ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND ++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE ++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS ++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF ++ * SUCH DAMAGE. ++ * ++ * The licence and distribution terms for any publically available version or ++ * derivative of this code cannot be changed. i.e. this code cannot simply be ++ * copied and put under another distribution licence ++ * [including the GNU Public Licence.] */ ++ ++// Altivec-optimized SHA1 in C. This is tested on ppc64le only. ++// ++// References: ++// https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 ++// http://arctic.org/~dean/crypto/sha1.html ++// ++// This code used the generic SHA-1 from OpenSSL as a basis and AltiVec ++// optimisations were added on top. ++ ++#include ++ ++#if defined(OPENSSL_PPC64LE) ++ ++#include ++ ++void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num); ++ ++static uint32_t rotate(uint32_t a, int n) { return (a << n) | (a >> (32 - n)); } ++ ++typedef vector unsigned int vec_uint32_t; ++typedef vector unsigned char vec_uint8_t; ++ ++// Vector constants ++static const vec_uint8_t k_swap_endianness = {3, 2, 1, 0, 7, 6, 5, 4, ++ 11, 10, 9, 8, 15, 14, 13, 12}; ++ ++// Shift amounts for byte and bit shifts and rotations ++static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32, ++ 32, 32, 32, 32, 32, 32, 32, 32}; ++static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96, ++ 96, 96, 96, 96, 96, 96, 96, 96}; ++ ++#define K_00_19 0x5a827999UL ++#define K_20_39 0x6ed9eba1UL ++#define K_40_59 0x8f1bbcdcUL ++#define K_60_79 0xca62c1d6UL ++ ++// Vector versions of the above. ++static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19}; ++static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39}; ++static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59}; ++static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79}; ++ ++// vector message scheduling: compute message schedule for round i..i+3 where i ++// is divisible by 4. We return the schedule w[i..i+3] as a vector. In ++// addition, we also precompute sum w[i..+3] and an additive constant K. This ++// is done to offload some computation of f() in the integer execution units. ++// ++// Byte shifting code below may not be correct for big-endian systems. ++static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data, ++ vec_uint32_t k) { ++ const vector unsigned char unaligned_data = ++ vec_vsx_ld(0, (const unsigned char*) data); ++ const vec_uint32_t v = (vec_uint32_t) unaligned_data; ++ const vec_uint32_t w = vec_perm(v, v, k_swap_endianness); ++ vec_st(w + k, 0, pre_added); ++ return w; ++} ++ ++// Compute w[i..i+3] using these steps for i in [16, 20, 24, 28] ++// ++// w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1 ++// w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1 ++// w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1 ++// w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1 ++// ++// w[ i] = w'[ i] ++// w[i+1] = w'[i+1] ++// w[i+2] = w'[i+2] ++// w[i+3] = w'[i+3] ^ (w'[i] <<< 1) ++static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4, ++ vec_uint32_t minus_8, vec_uint32_t minus_12, ++ vec_uint32_t minus_16, vec_uint32_t k) { ++ const vec_uint32_t minus_3 = vec_sro(minus_4, k_4_bytes); ++ const vec_uint32_t minus_14 = vec_sld((minus_12), (minus_16), 8); ++ const vec_uint32_t k_1_bit = vec_splat_u32(1); ++ const vec_uint32_t w_prime = ++ vec_rl(minus_3 ^ minus_8 ^ minus_14 ^ minus_16, k_1_bit); ++ const vec_uint32_t w = ++ w_prime ^ vec_rl(vec_slo(w_prime, k_12_bytes), k_1_bit); ++ vec_st(w + k, 0, pre_added); ++ return w; ++} ++ ++// Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76] ++// w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 ++static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4, ++ vec_uint32_t minus_8, vec_uint32_t minus_16, ++ vec_uint32_t minus_28, vec_uint32_t minus_32, ++ vec_uint32_t k) { ++ const vec_uint32_t minus_6 = vec_sld(minus_4, minus_8, 8); ++ const vec_uint32_t k_2_bits = vec_splat_u32(2); ++ const vec_uint32_t w = ++ vec_rl(minus_6 ^ minus_16 ^ minus_28 ^ minus_32, k_2_bits); ++ vec_st(w + k, 0, pre_added); ++ return w; ++} ++ ++// As pointed out by Wei Dai , F() below can be simplified ++// to the code in F_00_19. Wei attributes these optimisations to Peter ++// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define ++// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another ++// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a ++#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) ++#define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) ++#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) ++#define F_60_79(b, c, d) F_20_39(b, c, d) ++ ++// We pre-added the K constants during message scheduling. ++#define BODY_00_19(i, a, b, c, d, e, f) \ ++ do { \ ++ (f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \ ++ (b) = rotate((b), 30); \ ++ } while (0) ++ ++#define BODY_20_39(i, a, b, c, d, e, f) \ ++ do { \ ++ (f) = w[i] + (e) + rotate((a), 5) + F_20_39((b), (c), (d)); \ ++ (b) = rotate((b), 30); \ ++ } while (0) ++ ++#define BODY_40_59(i, a, b, c, d, e, f) \ ++ do { \ ++ (f) = w[i] + (e) + rotate((a), 5) + F_40_59((b), (c), (d)); \ ++ (b) = rotate((b), 30); \ ++ } while (0) ++ ++#define BODY_60_79(i, a, b, c, d, e, f) \ ++ do { \ ++ (f) = w[i] + (e) + rotate((a), 5) + F_60_79((b), (c), (d)); \ ++ (b) = rotate((b), 30); \ ++ } while (0) ++ ++void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { ++ uint32_t A, B, C, D, E, T; ++ ++ A = state[0]; ++ B = state[1]; ++ C = state[2]; ++ D = state[3]; ++ E = state[4]; ++ ++ for (;;) { ++ vec_uint32_t vw[20]; ++ const uint32_t *w = (const uint32_t *)&vw; ++ ++ vec_uint32_t k = K_00_19_x_4; ++ const vec_uint32_t w0 = sched_00_15(vw + 0, data + 0, k); ++ BODY_00_19(0, A, B, C, D, E, T); ++ BODY_00_19(1, T, A, B, C, D, E); ++ BODY_00_19(2, E, T, A, B, C, D); ++ BODY_00_19(3, D, E, T, A, B, C); ++ ++ const vec_uint32_t w4 = sched_00_15(vw + 1, data + 16, k); ++ BODY_00_19(4, C, D, E, T, A, B); ++ BODY_00_19(5, B, C, D, E, T, A); ++ BODY_00_19(6, A, B, C, D, E, T); ++ BODY_00_19(7, T, A, B, C, D, E); ++ ++ const vec_uint32_t w8 = sched_00_15(vw + 2, data + 32, k); ++ BODY_00_19(8, E, T, A, B, C, D); ++ BODY_00_19(9, D, E, T, A, B, C); ++ BODY_00_19(10, C, D, E, T, A, B); ++ BODY_00_19(11, B, C, D, E, T, A); ++ ++ const vec_uint32_t w12 = sched_00_15(vw + 3, data + 48, k); ++ BODY_00_19(12, A, B, C, D, E, T); ++ BODY_00_19(13, T, A, B, C, D, E); ++ BODY_00_19(14, E, T, A, B, C, D); ++ BODY_00_19(15, D, E, T, A, B, C); ++ ++ const vec_uint32_t w16 = sched_16_31(vw + 4, w12, w8, w4, w0, k); ++ BODY_00_19(16, C, D, E, T, A, B); ++ BODY_00_19(17, B, C, D, E, T, A); ++ BODY_00_19(18, A, B, C, D, E, T); ++ BODY_00_19(19, T, A, B, C, D, E); ++ ++ k = K_20_39_x_4; ++ const vec_uint32_t w20 = sched_16_31(vw + 5, w16, w12, w8, w4, k); ++ BODY_20_39(20, E, T, A, B, C, D); ++ BODY_20_39(21, D, E, T, A, B, C); ++ BODY_20_39(22, C, D, E, T, A, B); ++ BODY_20_39(23, B, C, D, E, T, A); ++ ++ const vec_uint32_t w24 = sched_16_31(vw + 6, w20, w16, w12, w8, k); ++ BODY_20_39(24, A, B, C, D, E, T); ++ BODY_20_39(25, T, A, B, C, D, E); ++ BODY_20_39(26, E, T, A, B, C, D); ++ BODY_20_39(27, D, E, T, A, B, C); ++ ++ const vec_uint32_t w28 = sched_16_31(vw + 7, w24, w20, w16, w12, k); ++ BODY_20_39(28, C, D, E, T, A, B); ++ BODY_20_39(29, B, C, D, E, T, A); ++ BODY_20_39(30, A, B, C, D, E, T); ++ BODY_20_39(31, T, A, B, C, D, E); ++ ++ const vec_uint32_t w32 = sched_32_79(vw + 8, w28, w24, w16, w4, w0, k); ++ BODY_20_39(32, E, T, A, B, C, D); ++ BODY_20_39(33, D, E, T, A, B, C); ++ BODY_20_39(34, C, D, E, T, A, B); ++ BODY_20_39(35, B, C, D, E, T, A); ++ ++ const vec_uint32_t w36 = sched_32_79(vw + 9, w32, w28, w20, w8, w4, k); ++ BODY_20_39(36, A, B, C, D, E, T); ++ BODY_20_39(37, T, A, B, C, D, E); ++ BODY_20_39(38, E, T, A, B, C, D); ++ BODY_20_39(39, D, E, T, A, B, C); ++ ++ k = K_40_59_x_4; ++ const vec_uint32_t w40 = sched_32_79(vw + 10, w36, w32, w24, w12, w8, k); ++ BODY_40_59(40, C, D, E, T, A, B); ++ BODY_40_59(41, B, C, D, E, T, A); ++ BODY_40_59(42, A, B, C, D, E, T); ++ BODY_40_59(43, T, A, B, C, D, E); ++ ++ const vec_uint32_t w44 = sched_32_79(vw + 11, w40, w36, w28, w16, w12, k); ++ BODY_40_59(44, E, T, A, B, C, D); ++ BODY_40_59(45, D, E, T, A, B, C); ++ BODY_40_59(46, C, D, E, T, A, B); ++ BODY_40_59(47, B, C, D, E, T, A); ++ ++ const vec_uint32_t w48 = sched_32_79(vw + 12, w44, w40, w32, w20, w16, k); ++ BODY_40_59(48, A, B, C, D, E, T); ++ BODY_40_59(49, T, A, B, C, D, E); ++ BODY_40_59(50, E, T, A, B, C, D); ++ BODY_40_59(51, D, E, T, A, B, C); ++ ++ const vec_uint32_t w52 = sched_32_79(vw + 13, w48, w44, w36, w24, w20, k); ++ BODY_40_59(52, C, D, E, T, A, B); ++ BODY_40_59(53, B, C, D, E, T, A); ++ BODY_40_59(54, A, B, C, D, E, T); ++ BODY_40_59(55, T, A, B, C, D, E); ++ ++ const vec_uint32_t w56 = sched_32_79(vw + 14, w52, w48, w40, w28, w24, k); ++ BODY_40_59(56, E, T, A, B, C, D); ++ BODY_40_59(57, D, E, T, A, B, C); ++ BODY_40_59(58, C, D, E, T, A, B); ++ BODY_40_59(59, B, C, D, E, T, A); ++ ++ k = K_60_79_x_4; ++ const vec_uint32_t w60 = sched_32_79(vw + 15, w56, w52, w44, w32, w28, k); ++ BODY_60_79(60, A, B, C, D, E, T); ++ BODY_60_79(61, T, A, B, C, D, E); ++ BODY_60_79(62, E, T, A, B, C, D); ++ BODY_60_79(63, D, E, T, A, B, C); ++ ++ const vec_uint32_t w64 = sched_32_79(vw + 16, w60, w56, w48, w36, w32, k); ++ BODY_60_79(64, C, D, E, T, A, B); ++ BODY_60_79(65, B, C, D, E, T, A); ++ BODY_60_79(66, A, B, C, D, E, T); ++ BODY_60_79(67, T, A, B, C, D, E); ++ ++ const vec_uint32_t w68 = sched_32_79(vw + 17, w64, w60, w52, w40, w36, k); ++ BODY_60_79(68, E, T, A, B, C, D); ++ BODY_60_79(69, D, E, T, A, B, C); ++ BODY_60_79(70, C, D, E, T, A, B); ++ BODY_60_79(71, B, C, D, E, T, A); ++ ++ const vec_uint32_t w72 = sched_32_79(vw + 18, w68, w64, w56, w44, w40, k); ++ BODY_60_79(72, A, B, C, D, E, T); ++ BODY_60_79(73, T, A, B, C, D, E); ++ BODY_60_79(74, E, T, A, B, C, D); ++ BODY_60_79(75, D, E, T, A, B, C); ++ ++ // We don't use the last value ++ (void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k); ++ BODY_60_79(76, C, D, E, T, A, B); ++ BODY_60_79(77, B, C, D, E, T, A); ++ BODY_60_79(78, A, B, C, D, E, T); ++ BODY_60_79(79, T, A, B, C, D, E); ++ ++ const uint32_t mask = 0xffffffffUL; ++ state[0] = (state[0] + E) & mask; ++ state[1] = (state[1] + T) & mask; ++ state[2] = (state[2] + A) & mask; ++ state[3] = (state[3] + B) & mask; ++ state[4] = (state[4] + C) & mask; ++ ++ data += 64; ++ if (--num == 0) { ++ break; ++ } ++ ++ A = state[0]; ++ B = state[1]; ++ C = state[2]; ++ D = state[3]; ++ E = state[4]; ++ } ++} ++ ++#endif // OPENSSL_PPC64LE ++ ++#undef K_00_19 ++#undef K_20_39 ++#undef K_40_59 ++#undef K_60_79 ++#undef F_00_19 ++#undef F_20_39 ++#undef F_40_59 ++#undef F_60_79 ++#undef BODY_00_19 ++#undef BODY_20_39 ++#undef BODY_40_59 ++#undef BODY_60_79 +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/internal.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/internal.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/internal.h +@@ -181,7 +181,7 @@ extern "C" { + + + #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) || \ +- defined(OPENSSL_AARCH64) ++ defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE) + // OPENSSL_cpuid_setup initializes the platform-specific feature cache. + void OPENSSL_cpuid_setup(void); + #endif +@@ -1606,6 +1606,16 @@ OPENSSL_INLINE int CRYPTO_is_ARMv8_SHA51 + + #endif // OPENSSL_ARM || OPENSSL_AARCH64 + ++#if defined(OPENSSL_PPC64LE) ++ ++// CRYPTO_is_PPC64LE_vcrypto_capable returns true iff the current CPU supports ++// the Vector.AES category of instructions. ++int CRYPTO_is_PPC64LE_vcrypto_capable(void); ++ ++extern unsigned long OPENSSL_ppc64le_hwcap2; ++ ++#endif // OPENSSL_PPC64LE ++ + #if defined(BORINGSSL_DISPATCH_TEST) + // Runtime CPU dispatch testing support + +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/perlasm/ppc-xlate.pl +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/perlasm/ppc-xlate.pl +@@ -0,0 +1,320 @@ ++#! /usr/bin/env perl ++# Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the OpenSSL license (the "License"). You may not use ++# this file except in compliance with the License. You can obtain a copy ++# in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++ ++my $flavour = shift; ++my $output = shift; ++open STDOUT,">$output" || die "can't open $output: $!"; ++ ++my %GLOBALS; ++my %TYPES; ++my $dotinlocallabels=($flavour=~/linux/)?1:0; ++ ++################################################################ ++# directives which need special treatment on different platforms ++################################################################ ++my $type = sub { ++ my ($dir,$name,$type) = @_; ++ ++ $TYPES{$name} = $type; ++ if ($flavour =~ /linux/) { ++ $name =~ s|^\.||; ++ ".type $name,$type"; ++ } else { ++ ""; ++ } ++}; ++my $globl = sub { ++ my $junk = shift; ++ my $name = shift; ++ my $global = \$GLOBALS{$name}; ++ my $type = \$TYPES{$name}; ++ my $ret; ++ ++ $name =~ s|^\.||; ++ ++ SWITCH: for ($flavour) { ++ /aix/ && do { if (!$$type) { ++ $$type = "\@function"; ++ } ++ if ($$type =~ /function/) { ++ $name = ".$name"; ++ } ++ last; ++ }; ++ /osx/ && do { $name = "_$name"; ++ last; ++ }; ++ /linux.*(32|64le)/ ++ && do { $ret .= ".globl $name"; ++ if (!$$type) { ++ $ret .= "\n.type $name,\@function"; ++ $$type = "\@function"; ++ } ++ last; ++ }; ++ /linux.*64/ && do { $ret .= ".globl $name"; ++ if (!$$type) { ++ $ret .= "\n.type $name,\@function"; ++ $$type = "\@function"; ++ } ++ if ($$type =~ /function/) { ++ $ret .= "\n.section \".opd\",\"aw\""; ++ $ret .= "\n.align 3"; ++ $ret .= "\n$name:"; ++ $ret .= "\n.quad .$name,.TOC.\@tocbase,0"; ++ $ret .= "\n.previous"; ++ $name = ".$name"; ++ } ++ last; ++ }; ++ } ++ ++ $ret = ".globl $name" if (!$ret); ++ $$global = $name; ++ $ret; ++}; ++my $text = sub { ++ my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text"; ++ $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/); ++ $ret; ++}; ++my $machine = sub { ++ my $junk = shift; ++ my $arch = shift; ++ if ($flavour =~ /osx/) ++ { $arch =~ s/\"//g; ++ $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any"); ++ } ++ ".machine $arch"; ++}; ++my $size = sub { ++ if ($flavour =~ /linux/) ++ { shift; ++ my $name = shift; ++ my $real = $GLOBALS{$name} ? \$GLOBALS{$name} : \$name; ++ my $ret = ".size $$real,.-$$real"; ++ $name =~ s|^\.||; ++ if ($$real ne $name) { ++ $ret .= "\n.size $name,.-$$real"; ++ } ++ $ret; ++ } ++ else ++ { ""; } ++}; ++my $asciz = sub { ++ shift; ++ my $line = join(",",@_); ++ if ($line =~ /^"(.*)"$/) ++ { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; } ++ else ++ { ""; } ++}; ++my $quad = sub { ++ shift; ++ my @ret; ++ my ($hi,$lo); ++ for (@_) { ++ if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io) ++ { $hi=$1?"0x$1":"0"; $lo="0x$2"; } ++ elsif (/^([0-9]+)$/o) ++ { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl ++ else ++ { $hi=undef; $lo=$_; } ++ ++ if (defined($hi)) ++ { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); } ++ else ++ { push(@ret,".quad $lo"); } ++ } ++ join("\n",@ret); ++}; ++ ++################################################################ ++# simplified mnemonics not handled by at least one assembler ++################################################################ ++my $cmplw = sub { ++ my $f = shift; ++ my $cr = 0; $cr = shift if ($#_>1); ++ # Some out-of-date 32-bit GNU assembler just can't handle cmplw... ++ ($flavour =~ /linux.*32/) ? ++ " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 : ++ " cmplw ".join(',',$cr,@_); ++}; ++my $bdnz = sub { ++ my $f = shift; ++ my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint ++ " bc $bo,0,".shift; ++} if ($flavour!~/linux/); ++my $bltlr = sub { ++ my $f = shift; ++ my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint ++ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints ++ " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 : ++ " bclr $bo,0"; ++}; ++my $bnelr = sub { ++ my $f = shift; ++ my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint ++ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints ++ " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 : ++ " bclr $bo,2"; ++}; ++my $beqlr = sub { ++ my $f = shift; ++ my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint ++ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints ++ " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 : ++ " bclr $bo,2"; ++}; ++# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two ++# arguments is 64, with "operand out of range" error. ++my $extrdi = sub { ++ my ($f,$ra,$rs,$n,$b) = @_; ++ $b = ($b+$n)&63; $n = 64-$n; ++ " rldicl $ra,$rs,$b,$n"; ++}; ++my $vmr = sub { ++ my ($f,$vx,$vy) = @_; ++ " vor $vx,$vy,$vy"; ++}; ++ ++# Some ABIs specify vrsave, special-purpose register #256, as reserved ++# for system use. ++my $no_vrsave = ($flavour =~ /aix|linux64le/); ++my $mtspr = sub { ++ my ($f,$idx,$ra) = @_; ++ if ($idx == 256 && $no_vrsave) { ++ " or $ra,$ra,$ra"; ++ } else { ++ " mtspr $idx,$ra"; ++ } ++}; ++my $mfspr = sub { ++ my ($f,$rd,$idx) = @_; ++ if ($idx == 256 && $no_vrsave) { ++ " li $rd,-1"; ++ } else { ++ " mfspr $rd,$idx"; ++ } ++}; ++ ++# PowerISA 2.06 stuff ++sub vsxmem_op { ++ my ($f, $vrt, $ra, $rb, $op) = @_; ++ " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1); ++} ++# made-up unaligned memory reference AltiVec/VMX instructions ++my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x ++my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x ++my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx ++my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx ++my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x ++my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x ++ ++# PowerISA 2.07 stuff ++sub vcrypto_op { ++ my ($f, $vrt, $vra, $vrb, $op) = @_; ++ " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op; ++} ++my $vcipher = sub { vcrypto_op(@_, 1288); }; ++my $vcipherlast = sub { vcrypto_op(@_, 1289); }; ++my $vncipher = sub { vcrypto_op(@_, 1352); }; ++my $vncipherlast= sub { vcrypto_op(@_, 1353); }; ++my $vsbox = sub { vcrypto_op(@_, 0, 1480); }; ++my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); }; ++my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); }; ++my $vpmsumb = sub { vcrypto_op(@_, 1032); }; ++my $vpmsumd = sub { vcrypto_op(@_, 1224); }; ++my $vpmsubh = sub { vcrypto_op(@_, 1096); }; ++my $vpmsumw = sub { vcrypto_op(@_, 1160); }; ++my $vaddudm = sub { vcrypto_op(@_, 192); }; ++ ++my $mtsle = sub { ++ my ($f, $arg) = @_; ++ " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2); ++}; ++ ++# PowerISA 3.0 stuff ++my $maddhdu = sub { ++ my ($f, $rt, $ra, $rb, $rc) = @_; ++ " .long ".sprintf "0x%X",(4<<26)|($rt<<21)|($ra<<16)|($rb<<11)|($rc<<6)|49; ++}; ++my $maddld = sub { ++ my ($f, $rt, $ra, $rb, $rc) = @_; ++ " .long ".sprintf "0x%X",(4<<26)|($rt<<21)|($ra<<16)|($rb<<11)|($rc<<6)|51; ++}; ++ ++my $darn = sub { ++ my ($f, $rt, $l) = @_; ++ " .long ".sprintf "0x%X",(31<<26)|($rt<<21)|($l<<16)|(755<<1); ++}; ++ ++print <<___; ++// This file is generated from a similarly-named Perl script in the BoringSSL ++// source tree. Do not edit by hand. ++ ++#if defined(__has_feature) ++#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) ++#define OPENSSL_NO_ASM ++#endif ++#endif ++ ++#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) && defined(__ELF__) ++___ ++ ++while($line=<>) { ++ ++ $line =~ s|[#!;].*$||; # get rid of asm-style comments... ++ $line =~ s|/\*.*\*/||; # ... and C-style comments... ++ $line =~ s|^\s+||; # ... and skip white spaces in beginning... ++ $line =~ s|\s+$||; # ... and at the end ++ ++ { ++ $line =~ s|\.L(\w+)|L$1|g; # common denominator for Locallabel ++ $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels); ++ } ++ ++ { ++ $line =~ s|(^[\.\w]+)\:\s*||; ++ my $label = $1; ++ if ($label) { ++ my $xlated = ($GLOBALS{$label} or $label); ++ print "$xlated:"; ++ if ($flavour =~ /linux.*64le/) { ++ if ($TYPES{$label} =~ /function/) { ++ printf "\n.localentry %s,0\n",$xlated; ++ } ++ } ++ } ++ } ++ ++ { ++ $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||; ++ my $c = $1; $c = "\t" if ($c eq ""); ++ my $mnemonic = $2; ++ my $f = $3; ++ my $opcode = eval("\$$mnemonic"); ++ $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/); ++ if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); } ++ elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; } ++ } ++ ++ print $line if ($line); ++ print "\n"; ++} ++ ++print <<___; ++#endif // !OPENSSL_NO_ASM && __powerpc64__ && __ELF__ ++#if defined(__ELF__) ++// See https://www.airs.com/blog/archives/518. ++.section .note.GNU-stack,"",\%progbits ++#endif ++___ ++ ++close STDOUT or die "error closing STDOUT: $!"; +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/test/abi_test.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/crypto/test/abi_test.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/test/abi_test.h +@@ -179,7 +179,78 @@ struct alignas(16) Reg128 { + CALLER_STATE_REGISTER(uint64_t, x28) \ + CALLER_STATE_REGISTER(uint64_t, x29) + +-#endif // X86_64 || X86 || ARM || AARCH64 ++#elif defined(OPENSSL_PPC64LE) ++ ++// CRReg only compares the CR2-CR4 bits of a CR register. ++struct CRReg { ++ uint32_t masked() const { return value & 0x00fff000; } ++ bool operator==(CRReg r) const { return masked() == r.masked(); } ++ bool operator!=(CRReg r) const { return masked() != r.masked(); } ++ uint32_t value; ++}; ++ ++// References: ++// ELFv2: http://openpowerfoundation.org/wp-content/uploads/resources/leabi/leabi-20170510.pdf ++// ++// Note vector and floating-point registers on POWER have two different names. ++// Originally, there were 32 floating-point registers and 32 vector registers, ++// labelled f0-f31 and v0-v31 respectively. Later, VSX (Vector Scalar Extension) ++// unified them into 64 registers vs0-vs63. f0-f31 map to the lower halves of ++// vs0-vs31. v0-v31 map to vs32-vs63. The ABI was defined in terms of pre-VSX ++// names, so we use those names here. In particular, f14-f31 are ++// callee-saved, but the upper halves of vs14-vs31 are not. ++#define LOOP_CALLER_STATE_REGISTERS() \ ++ CALLER_STATE_REGISTER(Reg128, v20) \ ++ CALLER_STATE_REGISTER(Reg128, v21) \ ++ CALLER_STATE_REGISTER(Reg128, v22) \ ++ CALLER_STATE_REGISTER(Reg128, v23) \ ++ CALLER_STATE_REGISTER(Reg128, v24) \ ++ CALLER_STATE_REGISTER(Reg128, v25) \ ++ CALLER_STATE_REGISTER(Reg128, v26) \ ++ CALLER_STATE_REGISTER(Reg128, v27) \ ++ CALLER_STATE_REGISTER(Reg128, v28) \ ++ CALLER_STATE_REGISTER(Reg128, v29) \ ++ CALLER_STATE_REGISTER(Reg128, v30) \ ++ CALLER_STATE_REGISTER(Reg128, v31) \ ++ CALLER_STATE_REGISTER(uint64_t, r14) \ ++ CALLER_STATE_REGISTER(uint64_t, r15) \ ++ CALLER_STATE_REGISTER(uint64_t, r16) \ ++ CALLER_STATE_REGISTER(uint64_t, r17) \ ++ CALLER_STATE_REGISTER(uint64_t, r18) \ ++ CALLER_STATE_REGISTER(uint64_t, r19) \ ++ CALLER_STATE_REGISTER(uint64_t, r20) \ ++ CALLER_STATE_REGISTER(uint64_t, r21) \ ++ CALLER_STATE_REGISTER(uint64_t, r22) \ ++ CALLER_STATE_REGISTER(uint64_t, r23) \ ++ CALLER_STATE_REGISTER(uint64_t, r24) \ ++ CALLER_STATE_REGISTER(uint64_t, r25) \ ++ CALLER_STATE_REGISTER(uint64_t, r26) \ ++ CALLER_STATE_REGISTER(uint64_t, r27) \ ++ CALLER_STATE_REGISTER(uint64_t, r28) \ ++ CALLER_STATE_REGISTER(uint64_t, r29) \ ++ CALLER_STATE_REGISTER(uint64_t, r30) \ ++ CALLER_STATE_REGISTER(uint64_t, r31) \ ++ CALLER_STATE_REGISTER(uint64_t, f14) \ ++ CALLER_STATE_REGISTER(uint64_t, f15) \ ++ CALLER_STATE_REGISTER(uint64_t, f16) \ ++ CALLER_STATE_REGISTER(uint64_t, f17) \ ++ CALLER_STATE_REGISTER(uint64_t, f18) \ ++ CALLER_STATE_REGISTER(uint64_t, f19) \ ++ CALLER_STATE_REGISTER(uint64_t, f20) \ ++ CALLER_STATE_REGISTER(uint64_t, f21) \ ++ CALLER_STATE_REGISTER(uint64_t, f22) \ ++ CALLER_STATE_REGISTER(uint64_t, f23) \ ++ CALLER_STATE_REGISTER(uint64_t, f24) \ ++ CALLER_STATE_REGISTER(uint64_t, f25) \ ++ CALLER_STATE_REGISTER(uint64_t, f26) \ ++ CALLER_STATE_REGISTER(uint64_t, f27) \ ++ CALLER_STATE_REGISTER(uint64_t, f28) \ ++ CALLER_STATE_REGISTER(uint64_t, f29) \ ++ CALLER_STATE_REGISTER(uint64_t, f30) \ ++ CALLER_STATE_REGISTER(uint64_t, f31) \ ++ CALLER_STATE_REGISTER(CRReg, cr) ++ ++#endif // X86_64 || X86 || ARM || AARCH64 || PPC64LE + + // Enable ABI testing if all of the following are true. + // +@@ -231,6 +302,12 @@ inline crypto_word_t ToWord(T t) { + // on 32-bit architectures for simplicity. + static_assert(sizeof(T) == 4, "parameter types must be word-sized"); + return (crypto_word_t)t; ++#elif defined(OPENSSL_PPC64LE) ++ // ELFv2, section 2.2.2.3 says the parameter save area sign- or zero-extends ++ // parameters passed in memory. Section 2.2.3 is unclear on how to handle ++ // register parameters, but section 2.2.2.3 additionally says that the memory ++ // copy of a parameter is identical to the register one. ++ return (crypto_word_t)t; + #elif defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64) + // AAPCS64, section 5.4.2, clauses C.7 and C.14 says any remaining bits in + // aarch are unspecified. iOS64 contradicts this and says the callee extends +@@ -285,9 +362,9 @@ inline crypto_word_t ToWord(T t) { + template + inline crypto_word_t CheckImpl(Result *out, bool unwind, R (*func)(Args...), + typename DeductionGuard::Type... args) { +- // We only support up to 8 arguments, so all arguments on aarch64 are passed +- // in registers. This is simpler and avoids the iOS discrepancy around packing +- // small arguments on the stack. (See the iOS64 reference.) ++ // We only support up to 8 arguments, so all arguments on aarch64 and ppc64le ++ // are passed in registers. This is simpler and avoids the iOS discrepancy ++ // around packing small arguments on the stack. (See the iOS64 reference.) + static_assert(sizeof...(args) <= 8, + "too many arguments for abi_test_trampoline"); + +Index: chromium-122.0.6261.57/third_party/boringssl/src/crypto/test/asm/trampoline-ppc.pl +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/crypto/test/asm/trampoline-ppc.pl +@@ -0,0 +1,262 @@ ++#!/usr/bin/env perl ++# Copyright (c) 2019, Google Inc. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ ++# This file defines helper functions for crypto/test/abi_test.h on ppc64le. See ++# that header for details on how to use this. ++# ++# For convenience, this file is linked into libcrypto, where consuming builds ++# already support architecture-specific sources. The static linker should drop ++# this code in non-test binaries. This includes a shared library build of ++# libcrypto, provided --gc-sections or equivalent is used. ++# ++# References: ++# ++# ELFv2: http://openpowerfoundation.org/wp-content/uploads/resources/leabi/leabi-20170510.pdf ++ ++use strict; ++ ++my $flavour = shift; ++my $output = shift; ++if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } ++ ++$0 =~ m/(.*[\/\\])[^\/\\]+$/; ++my $dir = $1; ++my $xlate; ++( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ++( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or ++die "can't locate ppc-xlate.pl"; ++ ++open OUT, "| \"$^X\" \"$xlate\" $flavour \"$output\""; ++*STDOUT = *OUT; ++ ++unless ($flavour =~ /linux.*64le/) { ++ die "This file only supports the ELFv2 ABI, used by ppc64le"; ++} ++ ++my $code = ""; ++ ++sub load_or_store_regs { ++ # $op is "l" or "st". ++ my ($op, $base_reg, $base_offset) = @_; ++ # Vector registers. ++ foreach (20..31) { ++ my $offset = $base_offset + ($_ - 20) * 16; ++ # Vector registers only support indexed register addressing. ++ $code .= "\tli\tr11, $offset\n"; ++ $code .= "\t${op}vx\tv$_, r11, $base_reg\n"; ++ } ++ # Save general registers. ++ foreach (14..31) { ++ my $offset = $base_offset + 192 + ($_ - 14) * 8; ++ $code .= "\t${op}d\tr$_, $offset($base_reg)\n"; ++ } ++ # Save floating point registers. ++ foreach (14..31) { ++ my $offset = $base_offset + 336 + ($_ - 14) * 8; ++ $code .= "\t${op}fd\tf$_, $offset($base_reg)\n"; ++ } ++} ++ ++sub load_regs { ++ my ($base_reg, $base_offset) = @_; ++ load_or_store_regs("l", $base_reg, $base_offset); ++} ++ ++sub store_regs { ++ my ($base_reg, $base_offset) = @_; ++ load_or_store_regs("st", $base_reg, $base_offset); ++} ++ ++my ($func, $state, $argv, $argc) = ("r3", "r4", "r5", "r6"); ++$code .= <<____; ++.machine "any" ++.text ++ ++# abi_test_trampoline loads callee-saved registers from |state|, calls |func| ++# with |argv|, then saves the callee-saved registers into |state|. It returns ++# the result of |func|. The |unwind| argument is unused. ++# uint64_t abi_test_trampoline(void (*func)(...), CallerState *state, ++# const uint64_t *argv, size_t argc, ++# uint64_t unwind); ++.globl abi_test_trampoline ++.align 5 ++abi_test_trampoline: ++ # LR is saved into the caller's stack frame. ++ mflr r0 ++ std r0, 16(r1) ++ ++ # Allocate 66*8 = 528 bytes of stack frame. From the top of the stack ++ # to the bottom, the stack frame is: ++ # ++ # 0(r1) - Back chain pointer ++ # 8(r1) - CR save area ++ # 16(r1) - LR save area (for |func|) ++ # 24(r1) - TOC pointer save area ++ # 32(r1) - Saved copy of |state| ++ # 40(r1) - Padding ++ # 48(r1) - Vector register save area (v20-v31, 12 registers) ++ # 240(r1) - General register save area (r14-r31, 18 registers) ++ # 384(r1) - Floating point register save area (f14-f31, 18 registers) ++ # ++ # Note the layouts of the register save areas and CallerState match. ++ # ++ # In the ELFv2 ABI, the parameter save area is optional if the function ++ # is non-variadic and all parameters fit in registers. We only support ++ # such functions, so we omit it to test that |func| does not rely on it. ++ stdu r1, -528(r1) ++ ++ mfcr r0 ++ std r0, 8(r1) # Save CR ++ std r2, 24(r1) # Save TOC ++ std $state, 32(r1) # Save |state| ++____ ++# Save registers to the stack. ++store_regs("r1", 48); ++# Load registers from the caller. ++load_regs($state, 0); ++$code .= <<____; ++ # Load CR from |state|. ++ ld r0, 480($state) ++ mtcr r0 ++ ++ # Move parameters into temporary registers so they are not clobbered. ++ addi r11, $argv, -8 # Adjust for ldu below ++ mr r12, $func ++ ++ # Load parameters into registers. ++ cmpdi $argc, 0 ++ beq .Largs_done ++ mtctr $argc ++ ldu r3, 8(r11) ++ bdz .Largs_done ++ ldu r4, 8(r11) ++ bdz .Largs_done ++ ldu r5, 8(r11) ++ bdz .Largs_done ++ ldu r6, 8(r11) ++ bdz .Largs_done ++ ldu r7, 8(r11) ++ bdz .Largs_done ++ ldu r8, 8(r11) ++ bdz .Largs_done ++ ldu r9, 8(r11) ++ bdz .Largs_done ++ ldu r10, 8(r11) ++ ++.Largs_done: ++ li r2, 0 # Clear TOC to test |func|'s global entry point ++ mtctr r12 ++ bctrl ++ ld r2, 24(r1) # Restore TOC ++ ++ ld $state, 32(r1) # Reload |state| ++____ ++# Output resulting registers to the caller. ++store_regs($state, 0); ++# Restore registers from the stack. ++load_regs("r1", 48); ++$code .= <<____; ++ mfcr r0 ++ std r0, 480($state) # Output CR to caller ++ ld r0, 8(r1) ++ mtcrf 0b00111000, r0 # Restore CR2-CR4 ++ addi r1, r1, 528 ++ ld r0, 16(r1) # Restore LR ++ mtlr r0 ++ blr ++.size abi_test_trampoline,.-abi_test_trampoline ++____ ++ ++# abi_test_clobber_* clobbers the corresponding register. These are used to test ++# the ABI-testing framework. ++foreach (0..31) { ++ # r1 is the stack pointer. r13 is the thread pointer. ++ next if ($_ == 1 || $_ == 13); ++ $code .= <<____; ++.globl abi_test_clobber_r$_ ++.align 5 ++abi_test_clobber_r$_: ++ li r$_, 0 ++ blr ++.size abi_test_clobber_r$_,.-abi_test_clobber_r$_ ++____ ++} ++ ++foreach (0..31) { ++ $code .= <<____; ++.globl abi_test_clobber_f$_ ++.align 4 ++abi_test_clobber_f$_: ++ li r0, 0 ++ # Use the red zone. ++ std r0, -8(r1) ++ lfd f$_, -8(r1) ++ blr ++.size abi_test_clobber_f$_,.-abi_test_clobber_f$_ ++____ ++} ++ ++foreach (0..31) { ++ $code .= <<____; ++.globl abi_test_clobber_v$_ ++.align 4 ++abi_test_clobber_v$_: ++ vxor v$_, v$_, v$_ ++ blr ++.size abi_test_clobber_v$_,.-abi_test_clobber_v$_ ++____ ++} ++ ++foreach (0..7) { ++ # PPC orders CR fields in big-endian, so the mask is reversed from what one ++ # would expect. ++ my $mask = 1 << (7 - $_); ++ $code .= <<____; ++.globl abi_test_clobber_cr$_ ++.align 4 ++abi_test_clobber_cr$_: ++ # Flip the bits on cr$_ rather than setting to zero. With a four-bit ++ # register, zeroing it will do nothing 1 in 16 times. ++ mfcr r0 ++ not r0, r0 ++ mtcrf $mask, r0 ++ blr ++.size abi_test_clobber_cr$_,.-abi_test_clobber_cr$_ ++____ ++} ++ ++$code .= <<____; ++.globl abi_test_clobber_ctr ++.align 4 ++abi_test_clobber_ctr: ++ li r0, 0 ++ mtctr r0 ++ blr ++.size abi_test_clobber_ctr,.-abi_test_clobber_ctr ++ ++.globl abi_test_clobber_lr ++.align 4 ++abi_test_clobber_lr: ++ mflr r0 ++ mtctr r0 ++ li r0, 0 ++ mtlr r0 ++ bctr ++.size abi_test_clobber_lr,.-abi_test_clobber_lr ++ ++____ ++ ++print $code; ++close STDOUT or die "error closing STDOUT: $!"; +Index: chromium-122.0.6261.57/third_party/boringssl/src/include/openssl/target.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/include/openssl/target.h ++++ chromium-122.0.6261.57/third_party/boringssl/src/include/openssl/target.h +@@ -34,6 +34,9 @@ + #elif defined(__ARMEL__) || defined(_M_ARM) + #define OPENSSL_32_BIT + #define OPENSSL_ARM ++#elif (defined(__PPC64__) || defined(__powerpc64__)) && defined(_LITTLE_ENDIAN) ++#define OPENSSL_64_BIT ++#define OPENSSL_PPC64LE + #elif defined(__MIPSEL__) && !defined(__LP64__) + #define OPENSSL_32_BIT + #define OPENSSL_MIPS +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/acvp/modulewrapper/main.cc +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/util/fipstools/acvp/modulewrapper/main.cc ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/acvp/modulewrapper/main.cc +@@ -37,6 +37,8 @@ int main(int argc, char **argv) { + puts("ARM (32-bit)"); + #elif defined(OPENSSL_AARCH64) + puts("aarch64 (64-bit)"); ++#elif defined(OPENSSL_PPC64LE) ++ puts("PPC64LE (64-bit)"); + #else + #error "FIPS build not supported on this architecture" + #endif +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate.go +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/util/fipstools/delocate/delocate.go ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate.go +@@ -54,7 +54,8 @@ type stringWriter interface { + type processorType int + + const ( +- x86_64 processorType = iota + 1 ++ ppc64le processorType = iota + 1 ++ x86_64 + aarch64 + ) + +@@ -67,6 +68,8 @@ type delocation struct { + + // symbols is the set of symbols defined in the module. + symbols map[string]struct{} ++ // localEntrySymbols is the set of symbols with .localentry directives. ++ localEntrySymbols map[string]struct{} + // redirectors maps from out-call symbol name to the name of a + // redirector function for that symbol. E.g. “memcpy” -> + // “bcm_redirector_memcpy”. +@@ -75,6 +78,9 @@ type delocation struct { + // should be used to reference it. E.g. “P384_data_storage” -> + // “P384_data_storage”. + bssAccessorsNeeded map[string]string ++ // tocLoaders is a set of symbol names for which TOC helper functions ++ // are required. (ppc64le only.) ++ tocLoaders map[string]struct{} + // gotExternalsNeeded is a set of symbol names for which we need + // “delta” symbols: symbols that contain the offset from their location + // to the memory in question. +@@ -151,6 +157,8 @@ func (d *delocation) processInput(input + switch d.processor { + case x86_64: + statement, err = d.processIntelInstruction(statement, node.up) ++ case ppc64le: ++ statement, err = d.processPPCInstruction(statement, node.up) + case aarch64: + statement, err = d.processAarch64Instruction(statement, node.up) + default: +@@ -247,7 +255,7 @@ func (d *delocation) processDirective(st + d.writeNode(statement) + break + +- case ".debug", ".note": ++ case ".debug", ".note", ".toc": + d.writeNode(statement) + break + +@@ -336,6 +344,10 @@ func (d *delocation) processLabelContain + d.output.WriteString("\t" + name + "\t" + strings.Join(args, ", ") + "\n") + } + ++ if name == ".localentry" { ++ d.output.WriteString(localEntryName(args[0]) + ":\n") ++ } ++ + return statement, nil + } + +@@ -646,6 +658,191 @@ func (d *delocation) processAarch64Instr + return statement, nil + } + ++/* ppc64le ++ ++[PABI]: “64-Bit ELF V2 ABI Specification. Power Architecture.” March 21st, ++ 2017 ++ ++(Also useful is “Power ISA Version 2.07 B”. Note that version three of that ++document is /not/ good as that's POWER9 specific.) ++ ++ppc64le doesn't have IP-relative addressing and does a lot to work around this. ++Rather than reference a PLT and GOT direction, it has a single structure called ++the TOC (Table Of Contents). Within the TOC is the contents of .rodata, .data, ++.got, .plt, .bss, etc sections [PABI;3.3]. ++ ++A pointer to the TOC is maintained in r2 and the following pattern is used to ++load the address of an element into a register: ++ ++ addis
, 2, foo@toc@ha ++ addi
,
, foo@toc@l ++ ++The “addis” instruction shifts a signed constant left 16 bits and adds the ++result to its second argument, saving the result in the first argument. The ++“addi” instruction does the same, but without shifting. Thus the “@toc@ha" ++suffix on a symbol means “the top 16 bits of the TOC offset” and “@toc@l” means ++“the bottom 16 bits of the offset”. However, note that both values are signed, ++thus offsets in the top half of a 64KB chunk will have an @ha value that's one ++greater than expected and a negative @l value. ++ ++The TOC is specific to a “module” (basically an executable or shared object). ++This means that there's not a single TOC in a process and that r2 needs to ++change as control moves between modules. Thus functions have two entry points: ++the “global” entry point and the “local” entry point. Jumps from within the ++same module can use the local entry while jumps from other modules must use the ++global entry. The global entry establishes the correct value of r2 before ++running the function and the local entry skips that code. ++ ++The global entry point for a function is defined by its label. The local entry ++is a power-of-two number of bytes from the global entry, set by the ++“.localentry” directive. (ppc64le instructions are always 32 bits, so an offset ++of 1 or 2 bytes is treated as an offset of zero.) ++ ++In order to help the global entry code set r2 to point to the local TOC, r12 is ++set to the address of the global entry point when called [PABI;2.2.1.1]. Thus ++the global entry will typically use an addis+addi pair to add a known offset to ++r12 and store it in r2. For example: ++ ++foo: ++ addis 2, 12, .TOC. - foo@ha ++ addi 2, 2, .TOC. - foo@l ++ ++(It's worth noting that the '@' operator binds very loosely, so the 3rd ++arguments parse as (.TOC. - foo)@ha and (.TOC. - foo)@l.) ++ ++When calling a function, the compiler doesn't know whether that function is in ++the same module or not. Thus it doesn't know whether r12 needs to be set nor ++whether r2 will be clobbered on return. Rather than always assume the worst, ++the linker fixes stuff up once it knows that a call is going out of module: ++ ++Firstly, calling, say, memcpy (which we assume to be in a different module) ++won't actually jump directly to memcpy, or even a PLT resolution function. ++It'll call a synthesised function that: ++ a) saves r2 in the caller's stack frame ++ b) loads the address of memcpy@PLT into r12 ++ c) jumps to r12. ++ ++As this synthesised function loads memcpy@PLT, a call to memcpy from the ++compiled code just references “memcpy” directly, not “memcpy@PLT”. ++ ++Since it jumps directly to memcpy@PLT, it can't restore r2 on return. Thus ++calls must be followed by a nop. If the call ends up going out-of-module, the ++linker will rewrite that nop to load r2 from the stack. ++ ++Speaking of the stack, the stack pointer is kept in r1 and there's a 288-byte ++red-zone. The format of the stack frame is defined [PABI;2.2.2] and must be ++followed as called functions will write into their parent's stack frame. For ++example, the synthesised out-of-module trampolines will save r2 24 bytes into ++the caller's frame and all non-leaf functions save the return address 16 bytes ++into the caller's frame. ++ ++A final point worth noting: some RISC ISAs have r0 wired to zero: all reads ++result in zero and all writes are discarded. POWER does something a little like ++that, but r0 is only special in certain argument positions for certain ++instructions. You just have to read the manual to know which they are. ++ ++ ++Delocation is easier than Intel because there's just TOC references, but it's ++also harder because there's no IP-relative addressing. ++ ++Jumps are IP-relative however, and have a 24-bit immediate value. So we can ++jump to functions that set a register to the needed value. (r3 is the ++return-value register and so that's what is generally used here.) */ ++ ++// isPPC64LEAPair recognises an addis+addi pair that's adding the offset of ++// source to relative and writing the result to target. ++func (d *delocation) isPPC64LEAPair(statement *node32) (target, source, relative string, ok bool) { ++ instruction := skipWS(statement.up).up ++ assertNodeType(instruction, ruleInstructionName) ++ name1 := d.contents(instruction) ++ args1 := instructionArgs(instruction.next) ++ ++ statement = statement.next ++ instruction = skipWS(statement.up).up ++ assertNodeType(instruction, ruleInstructionName) ++ name2 := d.contents(instruction) ++ args2 := instructionArgs(instruction.next) ++ ++ if name1 != "addis" || ++ len(args1) != 3 || ++ name2 != "addi" || ++ len(args2) != 3 { ++ return "", "", "", false ++ } ++ ++ target = d.contents(args1[0]) ++ relative = d.contents(args1[1]) ++ source1 := d.contents(args1[2]) ++ source2 := d.contents(args2[2]) ++ ++ if !strings.HasSuffix(source1, "@ha") || ++ !strings.HasSuffix(source2, "@l") || ++ source1[:len(source1)-3] != source2[:len(source2)-2] || ++ d.contents(args2[0]) != target || ++ d.contents(args2[1]) != target { ++ return "", "", "", false ++ } ++ ++ source = source1[:len(source1)-3] ++ ok = true ++ return ++} ++ ++// establishTOC writes the global entry prelude for a function. The standard ++// prelude involves relocations so this version moves the relocation outside ++// the integrity-checked area. ++func establishTOC(w stringWriter) { ++ w.WriteString("999:\n") ++ w.WriteString("\taddis 2, 12, .LBORINGSSL_external_toc-999b@ha\n") ++ w.WriteString("\taddi 2, 2, .LBORINGSSL_external_toc-999b@l\n") ++ w.WriteString("\tld 12, 0(2)\n") ++ w.WriteString("\tadd 2, 2, 12\n") ++} ++ ++// loadTOCFuncName returns the name of a synthesized function that sets r3 to ++// the value of “symbol+offset”. ++func loadTOCFuncName(symbol, offset string) string { ++ symbol = strings.Replace(symbol, ".", "_dot_", -1) ++ ret := ".Lbcm_loadtoc_" + symbol ++ if len(offset) != 0 { ++ offset = strings.Replace(offset, "+", "_plus_", -1) ++ offset = strings.Replace(offset, "-", "_minus_", -1) ++ ret += "_" + offset ++ } ++ return ret ++} ++ ++func (d *delocation) loadFromTOC(w stringWriter, symbol, offset, dest string) wrapperFunc { ++ d.tocLoaders[symbol+"\x00"+offset] = struct{}{} ++ ++ return func(k func()) { ++ w.WriteString("\taddi 1, 1, -288\n") // Clear the red zone. ++ w.WriteString("\tmflr " + dest + "\n") // Stash the link register. ++ w.WriteString("\tstd " + dest + ", -8(1)\n") ++ // The TOC loader will use r3, so stash it if necessary. ++ if dest != "3" { ++ w.WriteString("\tstd 3, -16(1)\n") ++ } ++ ++ // Because loadTOCFuncName returns a “.L” name, we don't need a ++ // nop after this call. ++ w.WriteString("\tbl " + loadTOCFuncName(symbol, offset) + "\n") ++ ++ // Cycle registers around. We need r3 -> destReg, -8(1) -> ++ // lr and, optionally, -16(1) -> r3. ++ w.WriteString("\tstd 3, -24(1)\n") ++ w.WriteString("\tld 3, -8(1)\n") ++ w.WriteString("\tmtlr 3\n") ++ w.WriteString("\tld " + dest + ", -24(1)\n") ++ if dest != "3" { ++ w.WriteString("\tld 3, -16(1)\n") ++ } ++ w.WriteString("\taddi 1, 1, 288\n") ++ ++ k() ++ } ++} ++ + func (d *delocation) gatherOffsets(symRef *node32, offsets string) (*node32, string) { + for symRef != nil && symRef.pegRule == ruleOffset { + offset := d.contents(symRef) +@@ -700,6 +897,215 @@ func (d *delocation) parseMemRef(memRef + return + } + ++func (d *delocation) processPPCInstruction(statement, instruction *node32) (*node32, error) { ++ assertNodeType(instruction, ruleInstructionName) ++ instructionName := d.contents(instruction) ++ isBranch := instructionName[0] == 'b' ++ ++ argNodes := instructionArgs(instruction.next) ++ ++ var wrappers wrapperStack ++ var args []string ++ changed := false ++ ++Args: ++ for i, arg := range argNodes { ++ fullArg := arg ++ isIndirect := false ++ ++ if arg.pegRule == ruleIndirectionIndicator { ++ arg = arg.next ++ isIndirect = true ++ } ++ ++ switch arg.pegRule { ++ case ruleRegisterOrConstant, ruleLocalLabelRef: ++ args = append(args, d.contents(fullArg)) ++ ++ case ruleTOCRefLow: ++ return nil, errors.New("Found low TOC reference outside preamble pattern") ++ ++ case ruleTOCRefHigh: ++ target, _, relative, ok := d.isPPC64LEAPair(statement) ++ if !ok { ++ return nil, errors.New("Found high TOC reference outside preamble pattern") ++ } ++ ++ if relative != "12" { ++ return nil, fmt.Errorf("preamble is relative to %q, not r12", relative) ++ } ++ ++ if target != "2" { ++ return nil, fmt.Errorf("preamble is setting %q, not r2", target) ++ } ++ ++ statement = statement.next ++ establishTOC(d.output) ++ instructionName = "" ++ changed = true ++ break Args ++ ++ case ruleMemoryRef: ++ symbol, offset, section, didChange, symbolIsLocal, memRef := d.parseMemRef(arg.up) ++ changed = didChange ++ ++ if len(symbol) > 0 { ++ if _, localEntrySymbol := d.localEntrySymbols[symbol]; localEntrySymbol && isBranch { ++ symbol = localEntryName(symbol) ++ changed = true ++ } else if _, knownSymbol := d.symbols[symbol]; knownSymbol { ++ symbol = localTargetName(symbol) ++ changed = true ++ } else if !symbolIsLocal && !isSynthesized(symbol) && len(section) == 0 { ++ changed = true ++ d.redirectors[symbol] = redirectorName(symbol) ++ symbol = redirectorName(symbol) ++ // TODO(davidben): This should sanity-check the next ++ // instruction is a nop and ideally remove it. ++ wrappers = append(wrappers, func(k func()) { ++ k() ++ // Like the linker's PLT stubs, redirector functions ++ // expect callers to restore r2. ++ d.output.WriteString("\tld 2, 24(1)\n") ++ }) ++ } ++ } ++ ++ switch section { ++ case "": ++ ++ case "tls": ++ // This section identifier just tells the ++ // assembler to use r13, the pointer to the ++ // thread-local data [PABI;3.7.3.3]. ++ ++ case "toc@ha": ++ // Delete toc@ha instructions. Per ++ // [PABI;3.6.3], the linker is allowed to erase ++ // toc@ha instructions. We take advantage of ++ // this by unconditionally erasing the toc@ha ++ // instructions and doing the full lookup when ++ // processing toc@l. ++ // ++ // Note that any offset here applies before @ha ++ // and @l. That is, 42+foo@toc@ha is ++ // #ha(42+foo-.TOC.), not 42+#ha(foo-.TOC.). Any ++ // corresponding toc@l references are required ++ // by the ABI to have the same offset. The ++ // offset will be incorporated in full when ++ // those are processed. ++ if instructionName != "addis" || len(argNodes) != 3 || i != 2 || args[1] != "2" { ++ return nil, errors.New("can't process toc@ha reference") ++ } ++ changed = true ++ instructionName = "" ++ break Args ++ ++ case "toc@l": ++ // Per [PAB;3.6.3], this instruction must take ++ // as input a register which was the output of ++ // a toc@ha computation and compute the actual ++ // address of some symbol. The toc@ha ++ // computation was elided, so we ignore that ++ // input register and compute the address ++ // directly. ++ changed = true ++ ++ // For all supported toc@l instructions, the ++ // destination register is the first argument. ++ destReg := args[0] ++ ++ wrappers = append(wrappers, d.loadFromTOC(d.output, symbol, offset, destReg)) ++ switch instructionName { ++ case "addi": ++ // The original instruction was: ++ // addi destReg, tocHaReg, offset+symbol@toc@l ++ instructionName = "" ++ ++ case "ld", "lhz", "lwz": ++ // The original instruction was: ++ // l?? destReg, offset+symbol@toc@l(tocHaReg) ++ // ++ // We transform that into the ++ // equivalent dereference of destReg: ++ // l?? destReg, 0(destReg) ++ origInstructionName := instructionName ++ instructionName = "" ++ ++ assertNodeType(memRef, ruleBaseIndexScale) ++ assertNodeType(memRef.up, ruleRegisterOrConstant) ++ if memRef.next != nil || memRef.up.next != nil { ++ return nil, errors.New("expected single register in BaseIndexScale for ld argument") ++ } ++ ++ baseReg := destReg ++ if baseReg == "0" { ++ // Register zero is special as the base register for a load. ++ // Avoid it by spilling and using r3 instead. ++ baseReg = "3" ++ wrappers = append(wrappers, func(k func()) { ++ d.output.WriteString("\taddi 1, 1, -288\n") // Clear the red zone. ++ d.output.WriteString("\tstd " + baseReg + ", -8(1)\n") ++ d.output.WriteString("\tmr " + baseReg + ", " + destReg + "\n") ++ k() ++ d.output.WriteString("\tld " + baseReg + ", -8(1)\n") ++ d.output.WriteString("\taddi 1, 1, 288\n") // Clear the red zone. ++ }) ++ } ++ ++ wrappers = append(wrappers, func(k func()) { ++ d.output.WriteString("\t" + origInstructionName + " " + destReg + ", 0(" + baseReg + ")\n") ++ }) ++ default: ++ return nil, fmt.Errorf("can't process TOC argument to %q", instructionName) ++ } ++ ++ default: ++ return nil, fmt.Errorf("Unknown section type %q", section) ++ } ++ ++ argStr := "" ++ if isIndirect { ++ argStr += "*" ++ } ++ argStr += symbol ++ if len(offset) > 0 { ++ argStr += offset ++ } ++ if len(section) > 0 { ++ argStr += "@" ++ argStr += section ++ } ++ ++ for ; memRef != nil; memRef = memRef.next { ++ argStr += d.contents(memRef) ++ } ++ ++ args = append(args, argStr) ++ ++ default: ++ panic(fmt.Sprintf("unknown instruction argument type %q", rul3s[arg.pegRule])) ++ } ++ } ++ ++ if changed { ++ d.writeCommentedNode(statement) ++ ++ var replacement string ++ if len(instructionName) > 0 { ++ replacement = "\t" + instructionName + "\t" + strings.Join(args, ", ") + "\n" ++ } ++ ++ wrappers.do(func() { ++ d.output.WriteString(replacement) ++ }) ++ } else { ++ d.writeNode(statement) ++ } ++ ++ return statement, nil ++} ++ + /* Intel */ + + type instructionType int +@@ -1323,6 +1729,8 @@ func writeAarch64Function(w stringWriter + func transform(w stringWriter, inputs []inputFile) error { + // symbols contains all defined symbols. + symbols := make(map[string]struct{}) ++ // localEntrySymbols contains all symbols with a .localentry directive. ++ localEntrySymbols := make(map[string]struct{}) + // fileNumbers is the set of IDs seen in .file directives. + fileNumbers := make(map[int]struct{}) + // maxObservedFileNumber contains the largest seen file number in a +@@ -1346,6 +1754,25 @@ func transform(w stringWriter, inputs [] + }, ruleStatement, ruleLabel, ruleSymbolName) + + forEachPath(input.ast.up, func(node *node32) { ++ node = node.up ++ assertNodeType(node, ruleLabelContainingDirectiveName) ++ directive := input.contents[node.begin:node.end] ++ if directive != ".localentry" { ++ return ++ } ++ // Extract the first argument. ++ node = skipWS(node.next) ++ assertNodeType(node, ruleSymbolArgs) ++ node = node.up ++ assertNodeType(node, ruleSymbolArg) ++ symbol := input.contents[node.begin:node.end] ++ if _, ok := localEntrySymbols[symbol]; ok { ++ panic(fmt.Sprintf("Duplicate .localentry directive found: %q in %q", symbol, input.path)) ++ } ++ localEntrySymbols[symbol] = struct{}{} ++ }, ruleStatement, ruleLabelContainingDirective) ++ ++ forEachPath(input.ast.up, func(node *node32) { + assertNodeType(node, ruleLocationDirective) + directive := input.contents[node.begin:node.end] + if !strings.HasPrefix(directive, ".file") { +@@ -1393,11 +1820,13 @@ func transform(w stringWriter, inputs [] + + d := &delocation{ + symbols: symbols, ++ localEntrySymbols: localEntrySymbols, + processor: processor, + commentIndicator: commentIndicator, + output: w, + redirectors: make(map[string]string), + bssAccessorsNeeded: make(map[string]string), ++ tocLoaders: make(map[string]struct{}), + gotExternalsNeeded: make(map[string]struct{}), + gotOffsetsNeeded: make(map[string]struct{}), + gotOffOffsetsNeeded: make(map[string]struct{}), +@@ -1432,6 +1861,22 @@ func transform(w stringWriter, inputs [] + for _, name := range redirectorNames { + redirector := d.redirectors[name] + switch d.processor { ++ case ppc64le: ++ w.WriteString(".section \".toc\", \"aw\"\n") ++ w.WriteString(".Lredirector_toc_" + name + ":\n") ++ w.WriteString(".quad " + name + "\n") ++ w.WriteString(".text\n") ++ w.WriteString(".type " + redirector + ", @function\n") ++ w.WriteString(redirector + ":\n") ++ // |name| will clobber r2, so save it. This is matched by a restore in ++ // redirector calls. ++ w.WriteString("\tstd 2, 24(1)\n") ++ // Load and call |name|'s global entry point. ++ w.WriteString("\taddis 12, 2, .Lredirector_toc_" + name + "@toc@ha\n") ++ w.WriteString("\tld 12, .Lredirector_toc_" + name + "@toc@l(12)\n") ++ w.WriteString("\tmtctr 12\n") ++ w.WriteString("\tbctr\n") ++ + case aarch64: + writeAarch64Function(w, redirector, func(w stringWriter) { + w.WriteString("\tb " + name + "\n") +@@ -1456,6 +1901,13 @@ func transform(w stringWriter, inputs [] + target := d.bssAccessorsNeeded[name] + + switch d.processor { ++ case ppc64le: ++ w.WriteString(".type " + funcName + ", @function\n") ++ w.WriteString(funcName + ":\n") ++ w.WriteString("\taddis 3, 2, " + target + "@toc@ha\n") ++ w.WriteString("\taddi 3, 3, " + target + "@toc@l\n") ++ w.WriteString("\tblr\n") ++ + case x86_64: + w.WriteString(".type " + funcName + ", @function\n") + w.WriteString(funcName + ":\n") +@@ -1471,6 +1923,26 @@ func transform(w stringWriter, inputs [] + } + + switch d.processor { ++ case ppc64le: ++ loadTOCNames := sortedSet(d.tocLoaders) ++ for _, symbolAndOffset := range loadTOCNames { ++ parts := strings.SplitN(symbolAndOffset, "\x00", 2) ++ symbol, offset := parts[0], parts[1] ++ ++ funcName := loadTOCFuncName(symbol, offset) ++ ref := symbol + offset ++ ++ w.WriteString(".type " + funcName[2:] + ", @function\n") ++ w.WriteString(funcName[2:] + ":\n") ++ w.WriteString(funcName + ":\n") ++ w.WriteString("\taddis 3, 2, " + ref + "@toc@ha\n") ++ w.WriteString("\taddi 3, 3, " + ref + "@toc@l\n") ++ w.WriteString("\tblr\n") ++ } ++ ++ w.WriteString(".LBORINGSSL_external_toc:\n") ++ w.WriteString(".quad .TOC.-.LBORINGSSL_external_toc\n") ++ + case aarch64: + externalNames := sortedSet(d.gotExternalsNeeded) + for _, symbol := range externalNames { +@@ -1781,6 +2253,10 @@ func localTargetName(name string) string + return ".L" + name + "_local_target" + } + ++func localEntryName(name string) string { ++ return ".L" + name + "_local_entry" ++} ++ + func isSynthesized(symbol string) bool { + return strings.HasSuffix(symbol, "_bss_get") || + symbol == "OPENSSL_ia32cap_get" || +@@ -1836,6 +2312,8 @@ func detectProcessor(input inputFile) pr + switch instructionName { + case "movq", "call", "leaq": + return x86_64 ++ case "addis", "addi", "mflr": ++ return ppc64le + case "str", "bl", "ldr", "st1": + return aarch64 + } +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate.peg +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/util/fipstools/delocate/delocate.peg ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate.peg +@@ -12,7 +12,7 @@ + # OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +-# This is a rough parser for x86-64 and aarch64 assembly designed to work with ++# This is a rough parser for x86-64 and ppc64le assembly designed to work with + # https://github.com/pointlander/peg. delocate.go has a go:generate line for + # rebuilding delocate.peg.go from this file. + +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate_test.go +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/util/fipstools/delocate/delocate_test.go ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/delocate_test.go +@@ -39,6 +39,11 @@ func (test *delocateTest) Path(file stri + + var delocateTests = []delocateTest{ + {"generic-FileDirectives", []string{"in.s"}, "out.s"}, ++ {"ppc64le-GlobalEntry", []string{"in.s"}, "out.s"}, ++ {"ppc64le-LoadToR0", []string{"in.s"}, "out.s"}, ++ {"ppc64le-Sample2", []string{"in.s"}, "out.s"}, ++ {"ppc64le-Sample", []string{"in.s"}, "out.s"}, ++ {"ppc64le-TOCWithOffset", []string{"in.s"}, "out.s"}, + {"x86_64-Basic", []string{"in.s"}, "out.s"}, + {"x86_64-BSS", []string{"in.s"}, "out.s"}, + {"x86_64-GOTRewrite", []string{"in.s"}, "out.s"}, +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-GlobalEntry/in.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-GlobalEntry/in.s +@@ -0,0 +1,9 @@ ++ .text ++foo: ++.LCF0: ++0: ++ addis 2,12,.TOC.-.LCF0@ha ++ addi 2,2,.TOC.-.LCF0@l ++ .localentry foo,.-foo ++.LVL0: ++ bl +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-GlobalEntry/out.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-GlobalEntry/out.s +@@ -0,0 +1,62 @@ ++.text ++.file 1 "inserted_by_delocate.c" ++.loc 1 1 0 ++BORINGSSL_bcm_text_start: ++ .text ++.Lfoo_local_target: ++foo: ++.LCF0: ++ ++0: ++ ++999: ++ addis 2, 12, .LBORINGSSL_external_toc-999b@ha ++ addi 2, 2, .LBORINGSSL_external_toc-999b@l ++ ld 12, 0(2) ++ add 2, 2, 12 ++# WAS addi 2,2,.TOC.-.LCF0@l ++ .localentry foo,.-foo ++.Lfoo_local_entry: ++.LVL0: ++ ++ bl ++.text ++.loc 1 2 0 ++BORINGSSL_bcm_text_end: ++.LBORINGSSL_external_toc: ++.quad .TOC.-.LBORINGSSL_external_toc ++.type BORINGSSL_bcm_text_hash, @object ++.size BORINGSSL_bcm_text_hash, 32 ++BORINGSSL_bcm_text_hash: ++.byte 0xae ++.byte 0x2c ++.byte 0xea ++.byte 0x2a ++.byte 0xbd ++.byte 0xa6 ++.byte 0xf3 ++.byte 0xec ++.byte 0x97 ++.byte 0x7f ++.byte 0x9b ++.byte 0xf6 ++.byte 0x94 ++.byte 0x9a ++.byte 0xfc ++.byte 0x83 ++.byte 0x68 ++.byte 0x27 ++.byte 0xcb ++.byte 0xa0 ++.byte 0xa0 ++.byte 0x9f ++.byte 0x6b ++.byte 0x6f ++.byte 0xde ++.byte 0x52 ++.byte 0xcd ++.byte 0xe2 ++.byte 0xcd ++.byte 0xff ++.byte 0x31 ++.byte 0x80 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-LoadToR0/in.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-LoadToR0/in.s +@@ -0,0 +1,4 @@ ++ .text ++foo: ++ addis 22,2,bar@toc@ha ++ ld 0,bar@toc@l(22) +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-LoadToR0/out.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-LoadToR0/out.s +@@ -0,0 +1,72 @@ ++.text ++.file 1 "inserted_by_delocate.c" ++.loc 1 1 0 ++BORINGSSL_bcm_text_start: ++ .text ++.Lfoo_local_target: ++foo: ++# WAS addis 22,2,bar@toc@ha ++# WAS ld 0,bar@toc@l(22) ++ addi 1, 1, -288 ++ mflr 0 ++ std 0, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc_bar ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 0, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ addi 1, 1, -288 ++ std 3, -8(1) ++ mr 3, 0 ++ ld 0, 0(3) ++ ld 3, -8(1) ++ addi 1, 1, 288 ++.text ++.loc 1 2 0 ++BORINGSSL_bcm_text_end: ++.type bcm_loadtoc_bar, @function ++bcm_loadtoc_bar: ++.Lbcm_loadtoc_bar: ++ addis 3, 2, bar@toc@ha ++ addi 3, 3, bar@toc@l ++ blr ++.LBORINGSSL_external_toc: ++.quad .TOC.-.LBORINGSSL_external_toc ++.type BORINGSSL_bcm_text_hash, @object ++.size BORINGSSL_bcm_text_hash, 32 ++BORINGSSL_bcm_text_hash: ++.byte 0xae ++.byte 0x2c ++.byte 0xea ++.byte 0x2a ++.byte 0xbd ++.byte 0xa6 ++.byte 0xf3 ++.byte 0xec ++.byte 0x97 ++.byte 0x7f ++.byte 0x9b ++.byte 0xf6 ++.byte 0x94 ++.byte 0x9a ++.byte 0xfc ++.byte 0x83 ++.byte 0x68 ++.byte 0x27 ++.byte 0xcb ++.byte 0xa0 ++.byte 0xa0 ++.byte 0x9f ++.byte 0x6b ++.byte 0x6f ++.byte 0xde ++.byte 0x52 ++.byte 0xcd ++.byte 0xe2 ++.byte 0xcd ++.byte 0xff ++.byte 0x31 ++.byte 0x80 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample/in.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample/in.s +@@ -0,0 +1,161 @@ ++ .file "foo.c" ++ .abiversion 2 ++ .section ".toc","aw" ++ .section ".text" ++ .section .rodata ++ .align 3 ++ .type kString, @object ++ .size kString, 12 ++kString: ++ .string "hello world" ++ .globl kExportedString ++ .align 3 ++ .type kExportedString, @object ++ .size kExportedString, 26 ++kExportedString: ++ .string "hello world, more visibly" ++ .align 2 ++ .type kGiantArray, @object ++ .size kGiantArray, 400000 ++kGiantArray: ++ .long 1 ++ .long 0 ++ .zero 399992 ++ .lcomm bss,20,4 ++ .type bss, @object ++ .align 3 ++.LC1: ++ .string "kString is %p\n" ++ .align 3 ++.LC2: ++ .string "kExportedString is %p\n" ++ .align 3 ++.LC4: ++ .string "function is %p\n" ++ .align 3 ++.LC5: ++ .string "exported_function is %p\n" ++ .align 3 ++.LC7: ++ .string "&kString[5] is %p\n" ++ .align 3 ++.LC9: ++ .string "&kGiantArray[0x12345] is %p\n" ++ .section ".toc","aw" ++.LC0: ++ .quad stderr ++.LC3: ++ .quad kExportedString ++.LC6: ++ .quad exported_function ++.LC8: ++ .quad kString+5 ++.LC10: ++ .quad kGiantArray+298260 ++ .section ".text" ++ .align 2 ++ .type function, @function ++function: ++0: addis 2,12,.TOC.-0b@ha ++ addi 2,2,.TOC.-0b@l ++ .localentry function,.-function ++ mflr 0 ++ std 0,16(1) ++ std 31,-8(1) ++ stdu 1,-112(1) ++ mr 31,1 ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC1@toc@ha ++ addi 4,4,.LC1@toc@l ++ addis 5,2,kString@toc@ha ++ addi 5,5,kString@toc@l ++ bl fprintf ++ nop ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC2@toc@ha ++ addi 4,4,.LC2@toc@l ++ addis 9,2,.LC3@toc@ha ++ ld 5,.LC3@toc@l(9) ++ bl fprintf ++ nop ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC4@toc@ha ++ addi 4,4,.LC4@toc@l ++ addis 5,2,function@toc@ha ++ addi 5,5,function@toc@l ++ bl fprintf ++ nop ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC5@toc@ha ++ addi 4,4,.LC5@toc@l ++ addis 9,2,.LC6@toc@ha ++ ld 5,.LC6@toc@l(9) ++ bl fprintf ++ nop ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC7@toc@ha ++ addi 4,4,.LC7@toc@l ++ addis 9,2,.LC8@toc@ha ++ ld 5,.LC8@toc@l(9) ++ bl fprintf ++ nop ++ addis 10,2,.LC0@toc@ha ++ ld 9,.LC0@toc@l(10) ++ ld 9,0(9) ++ mr 3,9 ++ addis 4,2,.LC9@toc@ha ++ addi 4,4,.LC9@toc@l ++ addis 9,2,.LC10@toc@ha ++ ld 5,.LC10@toc@l(9) ++ bl fprintf ++ nop ++ bl exported_function ++ nop ++ mr 3,9 ++ addi 1,31,112 ++ ld 0,16(1) ++ mtlr 0 ++ ld 31,-8(1) ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,1,0,1 ++ .size function,.-function ++ .align 2 ++ .globl exported_function ++ .type exported_function, @function ++exported_function: ++0: addis 2,12,.TOC.-0b@ha ++ addi 2,2,.TOC.-0b@l ++ .localentry exported_function,.-exported_function ++ mflr 0 ++ std 0,16(1) ++ std 31,-8(1) ++ stdu 1,-48(1) ++ mr 31,1 ++ bl function ++ mr 3,9 ++ addi 1,31,48 ++ ld 0,16(1) ++ mtlr 0 ++ ld 31,-8(1) ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,1,0,1 ++ .size exported_function,.-exported_function ++ .ident "GCC: (Ubuntu 4.9.2-10ubuntu13) 4.9.2" ++ .section .note.GNU-stack,"",@progbits +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample/out.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample/out.s +@@ -0,0 +1,552 @@ ++.text ++.file 1 "inserted_by_delocate.c" ++.loc 1 1 0 ++BORINGSSL_bcm_text_start: ++ .file "foo.c" ++ .abiversion 2 ++ .section ".toc","aw" ++# WAS .section ".text" ++.text ++# WAS .section .rodata ++.text ++ .align 3 ++ .type kString, @object ++ .size kString, 12 ++.LkString_local_target: ++kString: ++ .string "hello world" ++ .globl kExportedString ++ .align 3 ++ .type kExportedString, @object ++ .size kExportedString, 26 ++.LkExportedString_local_target: ++kExportedString: ++ .string "hello world, more visibly" ++ .align 2 ++ .type kGiantArray, @object ++ .size kGiantArray, 400000 ++.LkGiantArray_local_target: ++kGiantArray: ++ .long 1 ++ .long 0 ++ .zero 399992 ++ .lcomm bss,20,4 ++ .type bss, @object ++ .align 3 ++.LC1: ++ ++ .string "kString is %p\n" ++ .align 3 ++.LC2: ++ ++ .string "kExportedString is %p\n" ++ .align 3 ++.LC4: ++ ++ .string "function is %p\n" ++ .align 3 ++.LC5: ++ ++ .string "exported_function is %p\n" ++ .align 3 ++.LC7: ++ ++ .string "&kString[5] is %p\n" ++ .align 3 ++.LC9: ++ ++ .string "&kGiantArray[0x12345] is %p\n" ++ .section ".toc","aw" ++.LC0: ++ ++ .quad stderr ++.LC3: ++ ++ .quad kExportedString ++.LC6: ++ ++ .quad exported_function ++.LC8: ++ ++ .quad kString+5 ++.LC10: ++ ++ .quad kGiantArray+298260 ++# WAS .section ".text" ++.text ++ .align 2 ++ .type function, @function ++.Lfunction_local_target: ++function: ++0: ++999: ++ addis 2, 12, .LBORINGSSL_external_toc-999b@ha ++ addi 2, 2, .LBORINGSSL_external_toc-999b@l ++ ld 12, 0(2) ++ add 2, 2, 12 ++# WAS addi 2,2,.TOC.-0b@l ++ .localentry function,.-function ++.Lfunction_local_entry: ++ mflr 0 ++ std 0,16(1) ++ std 31,-8(1) ++ stdu 1,-112(1) ++ mr 31,1 ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC1@toc@ha ++# WAS addi 4,4,.LC1@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC1 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 5,2,kString@toc@ha ++# WAS addi 5,5,kString@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LkString_local_target ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC2@toc@ha ++# WAS addi 4,4,.LC2@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC2 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 9,2,.LC3@toc@ha ++# WAS ld 5,.LC3@toc@l(9) ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC3 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 5, 0(5) ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC4@toc@ha ++# WAS addi 4,4,.LC4@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC4 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 5,2,function@toc@ha ++# WAS addi 5,5,function@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfunction_local_target ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC5@toc@ha ++# WAS addi 4,4,.LC5@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC5 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 9,2,.LC6@toc@ha ++# WAS ld 5,.LC6@toc@l(9) ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC6 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 5, 0(5) ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC7@toc@ha ++# WAS addi 4,4,.LC7@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC7 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 9,2,.LC8@toc@ha ++# WAS ld 5,.LC8@toc@l(9) ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC8 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 5, 0(5) ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS addis 10,2,.LC0@toc@ha ++# WAS ld 9,.LC0@toc@l(10) ++ addi 1, 1, -288 ++ mflr 9 ++ std 9, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 9, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 9, 0(9) ++ ld 9,0(9) ++ mr 3,9 ++# WAS addis 4,2,.LC9@toc@ha ++# WAS addi 4,4,.LC9@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC9 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 9,2,.LC10@toc@ha ++# WAS ld 5,.LC10@toc@l(9) ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC10 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 5, 0(5) ++# WAS bl fprintf ++ bl bcm_redirector_fprintf ++ ld 2, 24(1) ++ nop ++# WAS bl exported_function ++ bl .Lexported_function_local_entry ++ nop ++ mr 3,9 ++ addi 1,31,112 ++ ld 0,16(1) ++ mtlr 0 ++ ld 31,-8(1) ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,1,0,1 ++ .size function,.-function ++ .align 2 ++ .globl exported_function ++ .type exported_function, @function ++.Lexported_function_local_target: ++exported_function: ++0: ++999: ++ addis 2, 12, .LBORINGSSL_external_toc-999b@ha ++ addi 2, 2, .LBORINGSSL_external_toc-999b@l ++ ld 12, 0(2) ++ add 2, 2, 12 ++# WAS addi 2,2,.TOC.-0b@l ++ .localentry exported_function,.-exported_function ++.Lexported_function_local_entry: ++ mflr 0 ++ std 0,16(1) ++ std 31,-8(1) ++ stdu 1,-48(1) ++ mr 31,1 ++# WAS bl function ++ bl .Lfunction_local_entry ++ mr 3,9 ++ addi 1,31,48 ++ ld 0,16(1) ++ mtlr 0 ++ ld 31,-8(1) ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,1,0,1 ++ .size exported_function,.-exported_function ++ .ident "GCC: (Ubuntu 4.9.2-10ubuntu13) 4.9.2" ++ .section .note.GNU-stack,"",@progbits ++.text ++.loc 1 2 0 ++BORINGSSL_bcm_text_end: ++.section ".toc", "aw" ++.Lredirector_toc_fprintf: ++.quad fprintf ++.text ++.type bcm_redirector_fprintf, @function ++bcm_redirector_fprintf: ++ std 2, 24(1) ++ addis 12, 2, .Lredirector_toc_fprintf@toc@ha ++ ld 12, .Lredirector_toc_fprintf@toc@l(12) ++ mtctr 12 ++ bctr ++.type bss_bss_get, @function ++bss_bss_get: ++ addis 3, 2, bss@toc@ha ++ addi 3, 3, bss@toc@l ++ blr ++.type bcm_loadtoc__dot_LC0, @function ++bcm_loadtoc__dot_LC0: ++.Lbcm_loadtoc__dot_LC0: ++ addis 3, 2, .LC0@toc@ha ++ addi 3, 3, .LC0@toc@l ++ blr ++.type bcm_loadtoc__dot_LC1, @function ++bcm_loadtoc__dot_LC1: ++.Lbcm_loadtoc__dot_LC1: ++ addis 3, 2, .LC1@toc@ha ++ addi 3, 3, .LC1@toc@l ++ blr ++.type bcm_loadtoc__dot_LC10, @function ++bcm_loadtoc__dot_LC10: ++.Lbcm_loadtoc__dot_LC10: ++ addis 3, 2, .LC10@toc@ha ++ addi 3, 3, .LC10@toc@l ++ blr ++.type bcm_loadtoc__dot_LC2, @function ++bcm_loadtoc__dot_LC2: ++.Lbcm_loadtoc__dot_LC2: ++ addis 3, 2, .LC2@toc@ha ++ addi 3, 3, .LC2@toc@l ++ blr ++.type bcm_loadtoc__dot_LC3, @function ++bcm_loadtoc__dot_LC3: ++.Lbcm_loadtoc__dot_LC3: ++ addis 3, 2, .LC3@toc@ha ++ addi 3, 3, .LC3@toc@l ++ blr ++.type bcm_loadtoc__dot_LC4, @function ++bcm_loadtoc__dot_LC4: ++.Lbcm_loadtoc__dot_LC4: ++ addis 3, 2, .LC4@toc@ha ++ addi 3, 3, .LC4@toc@l ++ blr ++.type bcm_loadtoc__dot_LC5, @function ++bcm_loadtoc__dot_LC5: ++.Lbcm_loadtoc__dot_LC5: ++ addis 3, 2, .LC5@toc@ha ++ addi 3, 3, .LC5@toc@l ++ blr ++.type bcm_loadtoc__dot_LC6, @function ++bcm_loadtoc__dot_LC6: ++.Lbcm_loadtoc__dot_LC6: ++ addis 3, 2, .LC6@toc@ha ++ addi 3, 3, .LC6@toc@l ++ blr ++.type bcm_loadtoc__dot_LC7, @function ++bcm_loadtoc__dot_LC7: ++.Lbcm_loadtoc__dot_LC7: ++ addis 3, 2, .LC7@toc@ha ++ addi 3, 3, .LC7@toc@l ++ blr ++.type bcm_loadtoc__dot_LC8, @function ++bcm_loadtoc__dot_LC8: ++.Lbcm_loadtoc__dot_LC8: ++ addis 3, 2, .LC8@toc@ha ++ addi 3, 3, .LC8@toc@l ++ blr ++.type bcm_loadtoc__dot_LC9, @function ++bcm_loadtoc__dot_LC9: ++.Lbcm_loadtoc__dot_LC9: ++ addis 3, 2, .LC9@toc@ha ++ addi 3, 3, .LC9@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfunction_local_target, @function ++bcm_loadtoc__dot_Lfunction_local_target: ++.Lbcm_loadtoc__dot_Lfunction_local_target: ++ addis 3, 2, .Lfunction_local_target@toc@ha ++ addi 3, 3, .Lfunction_local_target@toc@l ++ blr ++.type bcm_loadtoc__dot_LkString_local_target, @function ++bcm_loadtoc__dot_LkString_local_target: ++.Lbcm_loadtoc__dot_LkString_local_target: ++ addis 3, 2, .LkString_local_target@toc@ha ++ addi 3, 3, .LkString_local_target@toc@l ++ blr ++.LBORINGSSL_external_toc: ++.quad .TOC.-.LBORINGSSL_external_toc ++.type BORINGSSL_bcm_text_hash, @object ++.size BORINGSSL_bcm_text_hash, 32 ++BORINGSSL_bcm_text_hash: ++.byte 0xae ++.byte 0x2c ++.byte 0xea ++.byte 0x2a ++.byte 0xbd ++.byte 0xa6 ++.byte 0xf3 ++.byte 0xec ++.byte 0x97 ++.byte 0x7f ++.byte 0x9b ++.byte 0xf6 ++.byte 0x94 ++.byte 0x9a ++.byte 0xfc ++.byte 0x83 ++.byte 0x68 ++.byte 0x27 ++.byte 0xcb ++.byte 0xa0 ++.byte 0xa0 ++.byte 0x9f ++.byte 0x6b ++.byte 0x6f ++.byte 0xde ++.byte 0x52 ++.byte 0xcd ++.byte 0xe2 ++.byte 0xcd ++.byte 0xff ++.byte 0x31 ++.byte 0x80 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample2/in.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample2/in.s +@@ -0,0 +1,226 @@ ++ .file "foo.c" ++ .abiversion 2 ++ .section ".toc","aw" ++ .section ".text" ++ .section ".toc","aw" ++.LC0: ++ .quad stderr ++.LC3: ++ .quad kExportedString ++.LC6: ++ .quad exported_function ++ .section ".text" ++ .align 2 ++ .p2align 4,,15 ++ .globl exported_function ++ .type exported_function, @function ++exported_function: ++0: addis 2,12,.TOC.-0b@ha ++ addi 2,2,.TOC.-0b@l ++ .localentry exported_function,.-exported_function ++ mflr 0 ++ std 19,-104(1) ++ std 20,-96(1) ++ std 21,-88(1) ++ std 22,-80(1) ++ addis 21,2,.LC1@toc@ha ++ addis 22,2,.LC2@toc@ha ++ std 23,-72(1) ++ std 24,-64(1) ++ addis 23,2,.LC4@toc@ha ++ addis 24,2,function@toc@ha ++ std 25,-56(1) ++ std 26,-48(1) ++ addis 25,2,.LC5@toc@ha ++ addis 26,2,.LC7@toc@ha ++ std 27,-40(1) ++ std 28,-32(1) ++ addis 28,2,.LC8@toc@ha ++ addi 21,21,.LC1@toc@l ++ std 29,-24(1) ++ std 30,-16(1) ++ addis 29,2,.LANCHOR0@toc@ha ++ addi 22,22,.LC2@toc@l ++ std 31,-8(1) ++ std 0,16(1) ++ addi 29,29,.LANCHOR0@toc@l ++ addi 23,23,.LC4@toc@l ++ stdu 1,-208(1) ++ addis 31,2,.LC0@toc@ha # gpr load fusion, type long ++ ld 31,.LC0@toc@l(31) ++ addis 19,2,.LC3@toc@ha # gpr load fusion, type long ++ ld 19,.LC3@toc@l(19) ++ addis 30,29,0x5 ++ addi 24,24,function@toc@l ++ addis 20,2,.LC6@toc@ha # gpr load fusion, type long ++ ld 20,.LC6@toc@l(20) ++ addi 25,25,.LC5@toc@l ++ addi 26,26,.LC7@toc@l ++ addi 27,29,5 ++ addi 28,28,.LC8@toc@l ++ addi 30,30,-29404 ++ .p2align 4,,15 ++.L2: ++ ld 3,0(31) ++ mr 5,21 ++ mr 6,29 ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ mr 5,22 ++ mr 6,19 ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ mr 5,23 ++ mr 6,24 ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ mr 5,25 ++ mr 6,20 ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ mr 5,26 ++ mr 6,27 ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ li 4,1 ++ mr 5,28 ++ mr 6,30 ++ bl __fprintf_chk ++ nop ++ b .L2 ++ .long 0 ++ .byte 0,0,0,1,128,13,0,0 ++ .size exported_function,.-exported_function ++ .section ".toc","aw" ++ .set .LC11,.LC0 ++ .set .LC12,.LC3 ++ .set .LC13,.LC6 ++ .section ".text" ++ .align 2 ++ .p2align 4,,15 ++ .type function, @function ++function: ++0: addis 2,12,.TOC.-0b@ha ++ addi 2,2,.TOC.-0b@l ++ .localentry function,.-function ++ mflr 0 ++ std 31,-8(1) ++ addis 31,2,.LC11@toc@ha # gpr load fusion, type long ++ ld 31,.LC11@toc@l(31) ++ addis 5,2,.LC1@toc@ha ++ std 30,-16(1) ++ addis 30,2,.LANCHOR0@toc@ha ++ addi 5,5,.LC1@toc@l ++ addi 30,30,.LANCHOR0@toc@l ++ li 4,1 ++ mr 6,30 ++ std 0,16(1) ++ stdu 1,-112(1) ++ ld 3,0(31) ++ bl __fprintf_chk ++ nop ++ addis 6,2,.LC12@toc@ha # gpr load fusion, type long ++ ld 6,.LC12@toc@l(6) ++ ld 3,0(31) ++ addis 5,2,.LC2@toc@ha ++ li 4,1 ++ addi 5,5,.LC2@toc@l ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ addis 5,2,.LC4@toc@ha ++ addis 6,2,function@toc@ha ++ addi 5,5,.LC4@toc@l ++ addi 6,6,function@toc@l ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ addis 6,2,.LC13@toc@ha # gpr load fusion, type long ++ ld 6,.LC13@toc@l(6) ++ ld 3,0(31) ++ addis 5,2,.LC5@toc@ha ++ li 4,1 ++ addi 5,5,.LC5@toc@l ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ addis 5,2,.LC7@toc@ha ++ addi 6,30,5 ++ addi 5,5,.LC7@toc@l ++ li 4,1 ++ bl __fprintf_chk ++ nop ++ ld 3,0(31) ++ addis 6,30,0x5 ++ addis 5,2,.LC8@toc@ha ++ li 4,1 ++ addi 5,5,.LC8@toc@l ++ addi 6,6,-29404 ++ bl __fprintf_chk ++ nop ++ bl exported_function ++ nop ++ addi 1,1,112 ++ ld 0,16(1) ++ ld 30,-16(1) ++ ld 31,-8(1) ++ mtlr 0 ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,2,0,0 ++ .size function,.-function ++ .globl kExportedString ++ .section .rodata ++ .align 4 ++ .set .LANCHOR0,. + 0 ++ .type kString, @object ++ .size kString, 12 ++kString: ++ .string "hello world" ++ .zero 4 ++ .type kGiantArray, @object ++ .size kGiantArray, 400000 ++kGiantArray: ++ .long 1 ++ .long 0 ++ .zero 399992 ++ .type kExportedString, @object ++ .size kExportedString, 26 ++kExportedString: ++ .string "hello world, more visibly" ++ .section .rodata.str1.8,"aMS",@progbits,1 ++ .align 3 ++.LC1: ++ .string "kString is %p\n" ++ .zero 1 ++.LC2: ++ .string "kExportedString is %p\n" ++ .zero 1 ++.LC4: ++ .string "function is %p\n" ++.LC5: ++ .string "exported_function is %p\n" ++ .zero 7 ++.LC7: ++ .string "&kString[5] is %p\n" ++ .zero 5 ++.LC8: ++ .string "&kGiantArray[0x12345] is %p\n" ++ .section ".bss" ++ .align 2 ++ .type bss, @object ++ .size bss, 20 ++bss: ++ .zero 20 ++ .ident "GCC: (Ubuntu 4.9.2-10ubuntu13) 4.9.2" ++ .section .note.GNU-stack,"",@progbits +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample2/out.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-Sample2/out.s +@@ -0,0 +1,677 @@ ++.text ++.file 1 "inserted_by_delocate.c" ++.loc 1 1 0 ++BORINGSSL_bcm_text_start: ++ .file "foo.c" ++ .abiversion 2 ++ .section ".toc","aw" ++# WAS .section ".text" ++.text ++ .section ".toc","aw" ++.LC0: ++ ++ .quad stderr ++.LC3: ++ ++ .quad kExportedString ++.LC6: ++ ++ .quad exported_function ++# WAS .section ".text" ++.text ++ .align 2 ++ .p2align 4,,15 ++ .globl exported_function ++ .type exported_function, @function ++.Lexported_function_local_target: ++exported_function: ++0: ++999: ++ addis 2, 12, .LBORINGSSL_external_toc-999b@ha ++ addi 2, 2, .LBORINGSSL_external_toc-999b@l ++ ld 12, 0(2) ++ add 2, 2, 12 ++# WAS addi 2,2,.TOC.-0b@l ++ .localentry exported_function,.-exported_function ++.Lexported_function_local_entry: ++ mflr 0 ++ std 19,-104(1) ++ std 20,-96(1) ++ std 21,-88(1) ++ std 22,-80(1) ++# WAS addis 21,2,.LC1@toc@ha ++# WAS addis 22,2,.LC2@toc@ha ++ std 23,-72(1) ++ std 24,-64(1) ++# WAS addis 23,2,.LC4@toc@ha ++# WAS addis 24,2,function@toc@ha ++ std 25,-56(1) ++ std 26,-48(1) ++# WAS addis 25,2,.LC5@toc@ha ++# WAS addis 26,2,.LC7@toc@ha ++ std 27,-40(1) ++ std 28,-32(1) ++# WAS addis 28,2,.LC8@toc@ha ++# WAS addi 21,21,.LC1@toc@l ++ addi 1, 1, -288 ++ mflr 21 ++ std 21, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC1 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 21, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ std 29,-24(1) ++ std 30,-16(1) ++# WAS addis 29,2,.LANCHOR0@toc@ha ++# WAS addi 22,22,.LC2@toc@l ++ addi 1, 1, -288 ++ mflr 22 ++ std 22, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC2 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 22, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ std 31,-8(1) ++ std 0,16(1) ++# WAS addi 29,29,.LANCHOR0@toc@l ++ addi 1, 1, -288 ++ mflr 29 ++ std 29, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LANCHOR0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 29, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addi 23,23,.LC4@toc@l ++ addi 1, 1, -288 ++ mflr 23 ++ std 23, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC4 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 23, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ stdu 1,-208(1) ++# WAS addis 31,2,.LC0@toc@ha # gpr load fusion, type long ++# WAS ld 31,.LC0@toc@l(31) ++ addi 1, 1, -288 ++ mflr 31 ++ std 31, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 31, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 31, 0(31) ++# WAS addis 19,2,.LC3@toc@ha # gpr load fusion, type long ++# WAS ld 19,.LC3@toc@l(19) ++ addi 1, 1, -288 ++ mflr 19 ++ std 19, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC3 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 19, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 19, 0(19) ++ addis 30,29,0x5 ++# WAS addi 24,24,function@toc@l ++ addi 1, 1, -288 ++ mflr 24 ++ std 24, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfunction_local_target ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 24, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addis 20,2,.LC6@toc@ha # gpr load fusion, type long ++# WAS ld 20,.LC6@toc@l(20) ++ addi 1, 1, -288 ++ mflr 20 ++ std 20, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC6 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 20, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 20, 0(20) ++# WAS addi 25,25,.LC5@toc@l ++ addi 1, 1, -288 ++ mflr 25 ++ std 25, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC5 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 25, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addi 26,26,.LC7@toc@l ++ addi 1, 1, -288 ++ mflr 26 ++ std 26, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC7 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 26, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ addi 27,29,5 ++# WAS addi 28,28,.LC8@toc@l ++ addi 1, 1, -288 ++ mflr 28 ++ std 28, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC8 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 28, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ addi 30,30,-29404 ++ .p2align 4,,15 ++.L2: ++ ++ ld 3,0(31) ++ mr 5,21 ++ mr 6,29 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ mr 5,22 ++ mr 6,19 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ mr 5,23 ++ mr 6,24 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ mr 5,25 ++ mr 6,20 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ mr 5,26 ++ mr 6,27 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ li 4,1 ++ mr 5,28 ++ mr 6,30 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ b .L2 ++ .long 0 ++ .byte 0,0,0,1,128,13,0,0 ++ .size exported_function,.-exported_function ++ .section ".toc","aw" ++ .set .LC11,.LC0 ++ .set .LC12,.LC3 ++ .set .LC13,.LC6 ++# WAS .section ".text" ++.text ++ .align 2 ++ .p2align 4,,15 ++ .type function, @function ++.Lfunction_local_target: ++function: ++0: ++999: ++ addis 2, 12, .LBORINGSSL_external_toc-999b@ha ++ addi 2, 2, .LBORINGSSL_external_toc-999b@l ++ ld 12, 0(2) ++ add 2, 2, 12 ++# WAS addi 2,2,.TOC.-0b@l ++ .localentry function,.-function ++.Lfunction_local_entry: ++ mflr 0 ++ std 31,-8(1) ++# WAS addis 31,2,.LC11@toc@ha # gpr load fusion, type long ++# WAS ld 31,.LC11@toc@l(31) ++ addi 1, 1, -288 ++ mflr 31 ++ std 31, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC11 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 31, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 31, 0(31) ++# WAS addis 5,2,.LC1@toc@ha ++ std 30,-16(1) ++# WAS addis 30,2,.LANCHOR0@toc@ha ++# WAS addi 5,5,.LC1@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC1 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addi 30,30,.LANCHOR0@toc@l ++ addi 1, 1, -288 ++ mflr 30 ++ std 30, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LANCHOR0 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 30, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ li 4,1 ++ mr 6,30 ++ std 0,16(1) ++ stdu 1,-112(1) ++ ld 3,0(31) ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++# WAS addis 6,2,.LC12@toc@ha # gpr load fusion, type long ++# WAS ld 6,.LC12@toc@l(6) ++ addi 1, 1, -288 ++ mflr 6 ++ std 6, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC12 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 6, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 6, 0(6) ++ ld 3,0(31) ++# WAS addis 5,2,.LC2@toc@ha ++ li 4,1 ++# WAS addi 5,5,.LC2@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC2 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++# WAS addis 5,2,.LC4@toc@ha ++# WAS addis 6,2,function@toc@ha ++# WAS addi 5,5,.LC4@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC4 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS addi 6,6,function@toc@l ++ addi 1, 1, -288 ++ mflr 6 ++ std 6, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfunction_local_target ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 6, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++# WAS addis 6,2,.LC13@toc@ha # gpr load fusion, type long ++# WAS ld 6,.LC13@toc@l(6) ++ addi 1, 1, -288 ++ mflr 6 ++ std 6, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC13 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 6, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 6, 0(6) ++ ld 3,0(31) ++# WAS addis 5,2,.LC5@toc@ha ++ li 4,1 ++# WAS addi 5,5,.LC5@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC5 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++# WAS addis 5,2,.LC7@toc@ha ++ addi 6,30,5 ++# WAS addi 5,5,.LC7@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC7 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ li 4,1 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++ ld 3,0(31) ++ addis 6,30,0x5 ++# WAS addis 5,2,.LC8@toc@ha ++ li 4,1 ++# WAS addi 5,5,.LC8@toc@l ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_LC8 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ addi 6,6,-29404 ++# WAS bl __fprintf_chk ++ bl bcm_redirector___fprintf_chk ++ ld 2, 24(1) ++ nop ++# WAS bl exported_function ++ bl .Lexported_function_local_entry ++ nop ++ addi 1,1,112 ++ ld 0,16(1) ++ ld 30,-16(1) ++ ld 31,-8(1) ++ mtlr 0 ++ blr ++ .long 0 ++ .byte 0,0,0,1,128,2,0,0 ++ .size function,.-function ++ .globl kExportedString ++# WAS .section .rodata ++.text ++ .align 4 ++ .set .LANCHOR0,. + 0 ++ .type kString, @object ++ .size kString, 12 ++.LkString_local_target: ++kString: ++ .string "hello world" ++ .zero 4 ++ .type kGiantArray, @object ++ .size kGiantArray, 400000 ++.LkGiantArray_local_target: ++kGiantArray: ++ .long 1 ++ .long 0 ++ .zero 399992 ++ .type kExportedString, @object ++ .size kExportedString, 26 ++.LkExportedString_local_target: ++kExportedString: ++ .string "hello world, more visibly" ++# WAS .section .rodata.str1.8,"aMS",@progbits,1 ++.text ++ .align 3 ++.LC1: ++ ++ .string "kString is %p\n" ++ .zero 1 ++.LC2: ++ ++ .string "kExportedString is %p\n" ++ .zero 1 ++.LC4: ++ ++ .string "function is %p\n" ++.LC5: ++ ++ .string "exported_function is %p\n" ++ .zero 7 ++.LC7: ++ ++ .string "&kString[5] is %p\n" ++ .zero 5 ++.LC8: ++ ++ .string "&kGiantArray[0x12345] is %p\n" ++ .section ".bss" ++ .align 2 ++ .type bss, @object ++ .size bss, 20 ++bss: ++.Lbss_local_target: ++ ++ .zero 20 ++ .ident "GCC: (Ubuntu 4.9.2-10ubuntu13) 4.9.2" ++ .section .note.GNU-stack,"",@progbits ++.text ++.loc 1 2 0 ++BORINGSSL_bcm_text_end: ++.section ".toc", "aw" ++.Lredirector_toc___fprintf_chk: ++.quad __fprintf_chk ++.text ++.type bcm_redirector___fprintf_chk, @function ++bcm_redirector___fprintf_chk: ++ std 2, 24(1) ++ addis 12, 2, .Lredirector_toc___fprintf_chk@toc@ha ++ ld 12, .Lredirector_toc___fprintf_chk@toc@l(12) ++ mtctr 12 ++ bctr ++.type bss_bss_get, @function ++bss_bss_get: ++ addis 3, 2, .Lbss_local_target@toc@ha ++ addi 3, 3, .Lbss_local_target@toc@l ++ blr ++.type bcm_loadtoc__dot_LANCHOR0, @function ++bcm_loadtoc__dot_LANCHOR0: ++.Lbcm_loadtoc__dot_LANCHOR0: ++ addis 3, 2, .LANCHOR0@toc@ha ++ addi 3, 3, .LANCHOR0@toc@l ++ blr ++.type bcm_loadtoc__dot_LC0, @function ++bcm_loadtoc__dot_LC0: ++.Lbcm_loadtoc__dot_LC0: ++ addis 3, 2, .LC0@toc@ha ++ addi 3, 3, .LC0@toc@l ++ blr ++.type bcm_loadtoc__dot_LC1, @function ++bcm_loadtoc__dot_LC1: ++.Lbcm_loadtoc__dot_LC1: ++ addis 3, 2, .LC1@toc@ha ++ addi 3, 3, .LC1@toc@l ++ blr ++.type bcm_loadtoc__dot_LC11, @function ++bcm_loadtoc__dot_LC11: ++.Lbcm_loadtoc__dot_LC11: ++ addis 3, 2, .LC11@toc@ha ++ addi 3, 3, .LC11@toc@l ++ blr ++.type bcm_loadtoc__dot_LC12, @function ++bcm_loadtoc__dot_LC12: ++.Lbcm_loadtoc__dot_LC12: ++ addis 3, 2, .LC12@toc@ha ++ addi 3, 3, .LC12@toc@l ++ blr ++.type bcm_loadtoc__dot_LC13, @function ++bcm_loadtoc__dot_LC13: ++.Lbcm_loadtoc__dot_LC13: ++ addis 3, 2, .LC13@toc@ha ++ addi 3, 3, .LC13@toc@l ++ blr ++.type bcm_loadtoc__dot_LC2, @function ++bcm_loadtoc__dot_LC2: ++.Lbcm_loadtoc__dot_LC2: ++ addis 3, 2, .LC2@toc@ha ++ addi 3, 3, .LC2@toc@l ++ blr ++.type bcm_loadtoc__dot_LC3, @function ++bcm_loadtoc__dot_LC3: ++.Lbcm_loadtoc__dot_LC3: ++ addis 3, 2, .LC3@toc@ha ++ addi 3, 3, .LC3@toc@l ++ blr ++.type bcm_loadtoc__dot_LC4, @function ++bcm_loadtoc__dot_LC4: ++.Lbcm_loadtoc__dot_LC4: ++ addis 3, 2, .LC4@toc@ha ++ addi 3, 3, .LC4@toc@l ++ blr ++.type bcm_loadtoc__dot_LC5, @function ++bcm_loadtoc__dot_LC5: ++.Lbcm_loadtoc__dot_LC5: ++ addis 3, 2, .LC5@toc@ha ++ addi 3, 3, .LC5@toc@l ++ blr ++.type bcm_loadtoc__dot_LC6, @function ++bcm_loadtoc__dot_LC6: ++.Lbcm_loadtoc__dot_LC6: ++ addis 3, 2, .LC6@toc@ha ++ addi 3, 3, .LC6@toc@l ++ blr ++.type bcm_loadtoc__dot_LC7, @function ++bcm_loadtoc__dot_LC7: ++.Lbcm_loadtoc__dot_LC7: ++ addis 3, 2, .LC7@toc@ha ++ addi 3, 3, .LC7@toc@l ++ blr ++.type bcm_loadtoc__dot_LC8, @function ++bcm_loadtoc__dot_LC8: ++.Lbcm_loadtoc__dot_LC8: ++ addis 3, 2, .LC8@toc@ha ++ addi 3, 3, .LC8@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfunction_local_target, @function ++bcm_loadtoc__dot_Lfunction_local_target: ++.Lbcm_loadtoc__dot_Lfunction_local_target: ++ addis 3, 2, .Lfunction_local_target@toc@ha ++ addi 3, 3, .Lfunction_local_target@toc@l ++ blr ++.LBORINGSSL_external_toc: ++.quad .TOC.-.LBORINGSSL_external_toc ++.type BORINGSSL_bcm_text_hash, @object ++.size BORINGSSL_bcm_text_hash, 32 ++BORINGSSL_bcm_text_hash: ++.byte 0xae ++.byte 0x2c ++.byte 0xea ++.byte 0x2a ++.byte 0xbd ++.byte 0xa6 ++.byte 0xf3 ++.byte 0xec ++.byte 0x97 ++.byte 0x7f ++.byte 0x9b ++.byte 0xf6 ++.byte 0x94 ++.byte 0x9a ++.byte 0xfc ++.byte 0x83 ++.byte 0x68 ++.byte 0x27 ++.byte 0xcb ++.byte 0xa0 ++.byte 0xa0 ++.byte 0x9f ++.byte 0x6b ++.byte 0x6f ++.byte 0xde ++.byte 0x52 ++.byte 0xcd ++.byte 0xe2 ++.byte 0xcd ++.byte 0xff ++.byte 0x31 ++.byte 0x80 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-TOCWithOffset/in.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-TOCWithOffset/in.s +@@ -0,0 +1,23 @@ ++ .text ++foo: ++ # TOC references may have offsets. ++ addis 3, 2, 5+foo@toc@ha ++ addi 3, 3, 10+foo@toc@l ++ ++ addis 3, 2, 15+foo@toc@ha ++ addi 3, 3, 20+foo@toc@l ++ ++ addis 4, 2, foo@toc@ha ++ addi 4, 4, foo@toc@l ++ ++ addis 5, 2, 5+foo@toc@ha ++ ld 5, 10+foo@toc@l(5) ++ ++ addis 4, 2, foo-10@toc@ha ++ addi 4, 4, foo-10@toc@l ++ ++ addis 4, 2, foo@toc@ha+25 ++ addi 4, 4, foo@toc@l+25 ++ ++ addis 4, 2, 1+foo-2@toc@ha+3 ++ addi 4, 4, 1+foo-2@toc@l+3 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-TOCWithOffset/out.s +=================================================================== +--- /dev/null ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/fipstools/delocate/testdata/ppc64le-TOCWithOffset/out.s +@@ -0,0 +1,178 @@ ++.text ++.file 1 "inserted_by_delocate.c" ++.loc 1 1 0 ++BORINGSSL_bcm_text_start: ++ .text ++.Lfoo_local_target: ++foo: ++ # TOC references may have offsets. ++# WAS addis 3, 2, 5+foo@toc@ha ++# WAS addi 3, 3, 10+foo@toc@l ++ addi 1, 1, -288 ++ mflr 3 ++ std 3, -8(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__plus_10 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 3, -24(1) ++ addi 1, 1, 288 ++ ++# WAS addis 3, 2, 15+foo@toc@ha ++# WAS addi 3, 3, 20+foo@toc@l ++ addi 1, 1, -288 ++ mflr 3 ++ std 3, -8(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__plus_20 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 3, -24(1) ++ addi 1, 1, 288 ++ ++# WAS addis 4, 2, foo@toc@ha ++# WAS addi 4, 4, foo@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ++# WAS addis 5, 2, 5+foo@toc@ha ++# WAS ld 5, 10+foo@toc@l(5) ++ addi 1, 1, -288 ++ mflr 5 ++ std 5, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__plus_10 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 5, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ld 5, 0(5) ++ ++# WAS addis 4, 2, foo-10@toc@ha ++# WAS addi 4, 4, foo-10@toc@l ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__minus_10 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ++# WAS addis 4, 2, foo@toc@ha+25 ++# WAS addi 4, 4, foo@toc@l+25 ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__plus_25 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++ ++# WAS addis 4, 2, 1+foo-2@toc@ha+3 ++# WAS addi 4, 4, 1+foo-2@toc@l+3 ++ addi 1, 1, -288 ++ mflr 4 ++ std 4, -8(1) ++ std 3, -16(1) ++ bl .Lbcm_loadtoc__dot_Lfoo_local_target__plus_1_minus_2_plus_3 ++ std 3, -24(1) ++ ld 3, -8(1) ++ mtlr 3 ++ ld 4, -24(1) ++ ld 3, -16(1) ++ addi 1, 1, 288 ++.text ++.loc 1 2 0 ++BORINGSSL_bcm_text_end: ++.type bcm_loadtoc__dot_Lfoo_local_target, @function ++bcm_loadtoc__dot_Lfoo_local_target: ++.Lbcm_loadtoc__dot_Lfoo_local_target: ++ addis 3, 2, .Lfoo_local_target@toc@ha ++ addi 3, 3, .Lfoo_local_target@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfoo_local_target__plus_1_minus_2_plus_3, @function ++bcm_loadtoc__dot_Lfoo_local_target__plus_1_minus_2_plus_3: ++.Lbcm_loadtoc__dot_Lfoo_local_target__plus_1_minus_2_plus_3: ++ addis 3, 2, .Lfoo_local_target+1-2+3@toc@ha ++ addi 3, 3, .Lfoo_local_target+1-2+3@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfoo_local_target__plus_10, @function ++bcm_loadtoc__dot_Lfoo_local_target__plus_10: ++.Lbcm_loadtoc__dot_Lfoo_local_target__plus_10: ++ addis 3, 2, .Lfoo_local_target+10@toc@ha ++ addi 3, 3, .Lfoo_local_target+10@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfoo_local_target__plus_20, @function ++bcm_loadtoc__dot_Lfoo_local_target__plus_20: ++.Lbcm_loadtoc__dot_Lfoo_local_target__plus_20: ++ addis 3, 2, .Lfoo_local_target+20@toc@ha ++ addi 3, 3, .Lfoo_local_target+20@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfoo_local_target__plus_25, @function ++bcm_loadtoc__dot_Lfoo_local_target__plus_25: ++.Lbcm_loadtoc__dot_Lfoo_local_target__plus_25: ++ addis 3, 2, .Lfoo_local_target+25@toc@ha ++ addi 3, 3, .Lfoo_local_target+25@toc@l ++ blr ++.type bcm_loadtoc__dot_Lfoo_local_target__minus_10, @function ++bcm_loadtoc__dot_Lfoo_local_target__minus_10: ++.Lbcm_loadtoc__dot_Lfoo_local_target__minus_10: ++ addis 3, 2, .Lfoo_local_target-10@toc@ha ++ addi 3, 3, .Lfoo_local_target-10@toc@l ++ blr ++.LBORINGSSL_external_toc: ++.quad .TOC.-.LBORINGSSL_external_toc ++.type BORINGSSL_bcm_text_hash, @object ++.size BORINGSSL_bcm_text_hash, 32 ++BORINGSSL_bcm_text_hash: ++.byte 0xae ++.byte 0x2c ++.byte 0xea ++.byte 0x2a ++.byte 0xbd ++.byte 0xa6 ++.byte 0xf3 ++.byte 0xec ++.byte 0x97 ++.byte 0x7f ++.byte 0x9b ++.byte 0xf6 ++.byte 0x94 ++.byte 0x9a ++.byte 0xfc ++.byte 0x83 ++.byte 0x68 ++.byte 0x27 ++.byte 0xcb ++.byte 0xa0 ++.byte 0xa0 ++.byte 0x9f ++.byte 0x6b ++.byte 0x6f ++.byte 0xde ++.byte 0x52 ++.byte 0xcd ++.byte 0xe2 ++.byte 0xcd ++.byte 0xff ++.byte 0x31 ++.byte 0x80 +Index: chromium-122.0.6261.57/third_party/boringssl/src/util/generate_build_files.py +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/boringssl/src/util/generate_build_files.py ++++ chromium-122.0.6261.57/third_party/boringssl/src/util/generate_build_files.py +@@ -35,6 +35,7 @@ OS_ARCH_COMBOS = [ + ('apple', 'x86_64', 'macosx', [], 'S'), + ('linux', 'arm', 'linux32', [], 'S'), + ('linux', 'aarch64', 'linux64', [], 'S'), ++ ('linux', 'ppc64le', 'linux64le', [], 'S'), + ('linux', 'x86', 'elf', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'), + ('linux', 'x86_64', 'elf', [], 'S'), + ('win', 'x86', 'win32n', ['-DOPENSSL_IA32_SSE2'], 'asm'), diff --git a/0001-Add-PPC64-support-for-libdav1d.patch b/0001-Add-PPC64-support-for-libdav1d.patch new file mode 100644 index 0000000..145ae8d --- /dev/null +++ b/0001-Add-PPC64-support-for-libdav1d.patch @@ -0,0 +1,23 @@ +From 61dcf0ff9603e8f5b0a859fb0837c51527ebae43 Mon Sep 17 00:00:00 2001 +From: Colin Samples +Date: Fri, 1 Nov 2019 11:50:52 -0400 +Subject: [PATCH] Add PPC64 support for libdav1d + +--- + third_party/dav1d/BUILD.gn | 21 ++++++++++++++++++++ + third_party/dav1d/generate_configs.py | 28 ++------------------------- + third_party/dav1d/generate_source.py | 2 ++ + 3 files changed, 25 insertions(+), 26 deletions(-) + +Index: chromium-120.0.6099.71/third_party/dav1d/generate_configs.py +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/dav1d/generate_configs.py ++++ chromium-120.0.6099.71/third_party/dav1d/generate_configs.py +@@ -203,6 +203,7 @@ def main(): + linux_env = os.environ + linux_env['CC'] = 'clang' + ++ GenerateConfig('config/linux/ppc64', linux_env) + GenerateConfig('config/linux/x64', linux_env) + + noasm_dir = 'config/linux-noasm/x64' diff --git a/0001-Add-ppc64-target-to-libaom.patch b/0001-Add-ppc64-target-to-libaom.patch new file mode 100644 index 0000000..86630dd --- /dev/null +++ b/0001-Add-ppc64-target-to-libaom.patch @@ -0,0 +1,57 @@ +From 40309fb53e39477490fd6928ebe67c4fb78de380 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Sun, 10 Mar 2019 21:01:37 -0500 +Subject: [PATCH] Add ppc64 target to libaom + +--- + third_party/libaom/BUILD.gn | 12 ++++++++++++ + third_party/libaom/cmake_update.sh | 3 +++ + 2 files changed, 15 insertions(+) + +Index: chromium-120.0.6099.71/third_party/libaom/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libaom/BUILD.gn ++++ chromium-120.0.6099.71/third_party/libaom/BUILD.gn +@@ -248,6 +248,18 @@ if (current_cpu == "arm64") { + } + } + ++if (current_cpu == "ppc64") { ++ source_set("libaom_intrinsics_vsx") { ++ configs -= [ "//build/config/compiler:chromium_code" ] ++ configs += [ "//build/config/compiler:no_chromium_code" ] ++ configs += [ ":libaom_config" ] ++ sources = [ ++ "//third_party/libaom/source/libaom/aom_ports/ppc_cpudetect.c", ++ ] ++ sources += aom_av1_common_intrin_vsx ++ } ++} ++ + static_library("libaom") { + check_includes = false + if (!is_debug && is_win) { +@@ -312,6 +324,9 @@ static_library("libaom") { + # This is needed by all arm boards due to aom_arm_cpu_caps() + sources += [ "source/libaom/aom_ports/aarch32_cpudetect.c" ] + } ++ if (current_cpu == "ppc64") { ++ deps += [ ":libaom_intrinsics_vsx" ] ++ } + if (is_android) { + deps += [ "//third_party/cpu_features:ndk_compat" ] + } +Index: chromium-120.0.6099.71/third_party/libaom/cmake_update.sh +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libaom/cmake_update.sh ++++ chromium-120.0.6099.71/third_party/libaom/cmake_update.sh +@@ -187,6 +187,9 @@ gen_config_files linux/arm64-cpu-detect + "${toolchain}/arm64-linux-gcc.cmake -DCONFIG_RUNTIME_CPU_DETECT=1 \ + ${all_platforms}" + ++reset_dirs linux/ppc64 ++gen_config_files linux/ppc64 "${toolchain}/ppc-linux-gcc.cmake ${all_platforms}" ++ + # Copy linux configurations and modify for Windows. + reset_dirs win/arm64-cpu-detect + cp "${CFG}/linux/arm64-cpu-detect/config"/* \ diff --git a/0001-Add-pregenerated-config-for-libaom-on-ppc64.patch b/0001-Add-pregenerated-config-for-libaom-on-ppc64.patch new file mode 100644 index 0000000..afbbb0d --- /dev/null +++ b/0001-Add-pregenerated-config-for-libaom-on-ppc64.patch @@ -0,0 +1,2355 @@ +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.asm +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.asm +@@ -0,0 +1,92 @@ ++; ++; Copyright (c) 2024, Alliance for Open Media. All rights reserved ++; ++; This source code is subject to the terms of the BSD 2 Clause License and ++; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License ++; was not distributed with this source code in the LICENSE file, you can ++; obtain it at www.aomedia.org/license/software. If the Alliance for Open ++; Media Patent License 1.0 was not distributed with this source code in the ++; PATENTS file, you can obtain it at www.aomedia.org/license/patent. ++; ++AOM_ARCH_AARCH64 equ 0 ++AOM_ARCH_ARM equ 0 ++AOM_ARCH_PPC equ 1 ++AOM_ARCH_X86 equ 0 ++AOM_ARCH_X86_64 equ 0 ++CONFIG_ACCOUNTING equ 0 ++CONFIG_ANALYZER equ 0 ++CONFIG_AV1_DECODER equ 1 ++CONFIG_AV1_ENCODER equ 1 ++CONFIG_AV1_HIGHBITDEPTH equ 0 ++CONFIG_AV1_TEMPORAL_DENOISING equ 1 ++CONFIG_BIG_ENDIAN equ 0 ++CONFIG_BITRATE_ACCURACY equ 0 ++CONFIG_BITRATE_ACCURACY_BL equ 0 ++CONFIG_BITSTREAM_DEBUG equ 0 ++CONFIG_COEFFICIENT_RANGE_CHECKING equ 0 ++CONFIG_COLLECT_COMPONENT_TIMING equ 0 ++CONFIG_COLLECT_PARTITION_STATS equ 0 ++CONFIG_COLLECT_RD_STATS equ 0 ++CONFIG_CWG_C013 equ 0 ++CONFIG_DEBUG equ 0 ++CONFIG_DENOISE equ 1 ++CONFIG_DISABLE_FULL_PIXEL_SPLIT_8X8 equ 1 ++CONFIG_ENTROPY_STATS equ 0 ++CONFIG_EXCLUDE_SIMD_MISMATCH equ 0 ++CONFIG_FPMT_TEST equ 0 ++CONFIG_GCC equ 1 ++CONFIG_GCOV equ 0 ++CONFIG_GPROF equ 0 ++CONFIG_INSPECTION equ 0 ++CONFIG_INTERNAL_STATS equ 0 ++CONFIG_INTER_STATS_ONLY equ 0 ++CONFIG_LIBYUV equ 0 ++CONFIG_MAX_DECODE_PROFILE equ 0 ++CONFIG_MISMATCH_DEBUG equ 0 ++CONFIG_MULTITHREAD equ 1 ++CONFIG_NN_V2 equ 0 ++CONFIG_NORMAL_TILE_MODE equ 1 ++CONFIG_OPTICAL_FLOW_API equ 0 ++CONFIG_OS_SUPPORT equ 1 ++CONFIG_OUTPUT_FRAME_SIZE equ 0 ++CONFIG_PARTITION_SEARCH_ORDER equ 0 ++CONFIG_PIC equ 1 ++CONFIG_RATECTRL_LOG equ 0 ++CONFIG_RD_COMMAND equ 0 ++CONFIG_RD_DEBUG equ 0 ++CONFIG_REALTIME_ONLY equ 1 ++CONFIG_RT_ML_PARTITIONING equ 0 ++CONFIG_RUNTIME_CPU_DETECT equ 0 ++CONFIG_SALIENCY_MAP equ 0 ++CONFIG_SHARED equ 0 ++CONFIG_SIZE_LIMIT equ 1 ++CONFIG_SPATIAL_RESAMPLING equ 1 ++CONFIG_SPEED_STATS equ 0 ++CONFIG_TFLITE equ 0 ++CONFIG_THREE_PASS equ 0 ++CONFIG_TUNE_BUTTERAUGLI equ 0 ++CONFIG_TUNE_VMAF equ 0 ++CONFIG_WEBM_IO equ 1 ++DECODE_HEIGHT_LIMIT equ 16384 ++DECODE_WIDTH_LIMIT equ 16384 ++FORCE_HIGHBITDEPTH_DECODING equ 0 ++HAVE_ARM_CRC32 equ 0 ++HAVE_AVX equ 0 ++HAVE_AVX2 equ 0 ++HAVE_FEXCEPT equ 1 ++HAVE_MMX equ 0 ++HAVE_NEON equ 0 ++HAVE_NEON_DOTPROD equ 0 ++HAVE_NEON_I8MM equ 0 ++HAVE_PTHREAD_H equ 1 ++HAVE_SSE equ 0 ++HAVE_SSE2 equ 0 ++HAVE_SSE3 equ 0 ++HAVE_SSE4_1 equ 0 ++HAVE_SSE4_2 equ 0 ++HAVE_SSSE3 equ 0 ++HAVE_SVE equ 0 ++HAVE_UNISTD_H equ 1 ++HAVE_VSX equ 1 ++HAVE_WXWIDGETS equ 0 ++STATIC_LINK_JXL equ 0 +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.c +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.c +@@ -0,0 +1,13 @@ ++/* ++ * Copyright (c) 2024, Alliance for Open Media. All rights reserved ++ * ++ * This source code is subject to the terms of the BSD 2 Clause License and ++ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License ++ * was not distributed with this source code in the LICENSE file, you can ++ * obtain it at www.aomedia.org/license/software. If the Alliance for Open ++ * Media Patent License 1.0 was not distributed with this source code in the ++ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. ++ */ ++#include "aom/aom_codec.h" ++static const char* const cfg = "cmake ../source/libaom -G \"Unix Makefiles\" -DCMAKE_TOOLCHAIN_FILE=\"../source/libaom/build/cmake/toolchains/ppc-linux-gcc.cmake\" -DCONFIG_AV1_ENCODER=1 -DCONFIG_LIBYUV=0 -DCONFIG_AV1_HIGHBITDEPTH=0 -DCONFIG_AV1_TEMPORAL_DENOISING=1 -DCONFIG_REALTIME_ONLY=1 -DCONFIG_MAX_DECODE_PROFILE=0 -DCONFIG_NORMAL_TILE_MODE=1 -DCONFIG_SIZE_LIMIT=1 -DDECODE_HEIGHT_LIMIT=16384 -DDECODE_WIDTH_LIMIT=16384"; ++const char *aom_codec_build_config(void) {return cfg;} +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_config.h +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (c) 2024, Alliance for Open Media. All rights reserved ++ * ++ * This source code is subject to the terms of the BSD 2 Clause License and ++ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License ++ * was not distributed with this source code in the LICENSE file, you can ++ * obtain it at www.aomedia.org/license/software. If the Alliance for Open ++ * Media Patent License 1.0 was not distributed with this source code in the ++ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. ++ */ ++#ifndef AOM_CONFIG_H_ ++#define AOM_CONFIG_H_ ++#define AOM_ARCH_AARCH64 0 ++#define AOM_ARCH_ARM 0 ++#define AOM_ARCH_PPC 1 ++#define AOM_ARCH_X86 0 ++#define AOM_ARCH_X86_64 0 ++#define CONFIG_ACCOUNTING 0 ++#define CONFIG_ANALYZER 0 ++#define CONFIG_AV1_DECODER 1 ++#define CONFIG_AV1_ENCODER 1 ++#define CONFIG_AV1_HIGHBITDEPTH 0 ++#define CONFIG_AV1_TEMPORAL_DENOISING 1 ++#define CONFIG_BIG_ENDIAN 0 ++#define CONFIG_BITRATE_ACCURACY 0 ++#define CONFIG_BITRATE_ACCURACY_BL 0 ++#define CONFIG_BITSTREAM_DEBUG 0 ++#define CONFIG_COEFFICIENT_RANGE_CHECKING 0 ++#define CONFIG_COLLECT_COMPONENT_TIMING 0 ++#define CONFIG_COLLECT_PARTITION_STATS 0 ++#define CONFIG_COLLECT_RD_STATS 0 ++#define CONFIG_CWG_C013 0 ++#define CONFIG_DEBUG 0 ++#define CONFIG_DENOISE 1 ++#define CONFIG_DISABLE_FULL_PIXEL_SPLIT_8X8 1 ++#define CONFIG_ENTROPY_STATS 0 ++#define CONFIG_EXCLUDE_SIMD_MISMATCH 0 ++#define CONFIG_FPMT_TEST 0 ++#define CONFIG_GCC 1 ++#define CONFIG_GCOV 0 ++#define CONFIG_GPROF 0 ++#define CONFIG_INSPECTION 0 ++#define CONFIG_INTERNAL_STATS 0 ++#define CONFIG_INTER_STATS_ONLY 0 ++#define CONFIG_LIBYUV 0 ++#define CONFIG_MAX_DECODE_PROFILE 0 ++#define CONFIG_MISMATCH_DEBUG 0 ++#define CONFIG_MULTITHREAD 1 ++#define CONFIG_NN_V2 0 ++#define CONFIG_NORMAL_TILE_MODE 1 ++#define CONFIG_OPTICAL_FLOW_API 0 ++#define CONFIG_OS_SUPPORT 1 ++#define CONFIG_OUTPUT_FRAME_SIZE 0 ++#define CONFIG_PARTITION_SEARCH_ORDER 0 ++#define CONFIG_PIC 1 ++#define CONFIG_RATECTRL_LOG 0 ++#define CONFIG_RD_COMMAND 0 ++#define CONFIG_RD_DEBUG 0 ++#define CONFIG_REALTIME_ONLY 1 ++#define CONFIG_RT_ML_PARTITIONING 0 ++#define CONFIG_RUNTIME_CPU_DETECT 0 ++#define CONFIG_SALIENCY_MAP 0 ++#define CONFIG_SHARED 0 ++#define CONFIG_SIZE_LIMIT 1 ++#define CONFIG_SPATIAL_RESAMPLING 1 ++#define CONFIG_SPEED_STATS 0 ++#define CONFIG_TFLITE 0 ++#define CONFIG_THREE_PASS 0 ++#define CONFIG_TUNE_BUTTERAUGLI 0 ++#define CONFIG_TUNE_VMAF 0 ++#define CONFIG_WEBM_IO 1 ++#define DECODE_HEIGHT_LIMIT 16384 ++#define DECODE_WIDTH_LIMIT 16384 ++#define FORCE_HIGHBITDEPTH_DECODING 0 ++#define HAVE_ARM_CRC32 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_FEXCEPT 1 ++#define HAVE_MMX 0 ++#define HAVE_NEON 0 ++#define HAVE_NEON_DOTPROD 0 ++#define HAVE_NEON_I8MM 0 ++#define HAVE_PTHREAD_H 1 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4_1 0 ++#define HAVE_SSE4_2 0 ++#define HAVE_SSSE3 0 ++#define HAVE_SVE 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VSX 1 ++#define HAVE_WXWIDGETS 0 ++#define INLINE inline ++#define STATIC_LINK_JXL 0 ++#endif // AOM_CONFIG_H_ +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_dsp_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_dsp_rtcd.h +@@ -0,0 +1,1541 @@ ++// This file is generated. Do not edit. ++#ifndef AOM_DSP_RTCD_H_ ++#define AOM_DSP_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * DSP ++ */ ++ ++#include "aom/aom_integer.h" ++#include "aom_dsp/aom_dsp_common.h" ++#include "av1/common/blockd.h" ++#include "av1/common/enums.h" ++ ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++unsigned int aom_avg_4x4_c(const uint8_t *, int p); ++#define aom_avg_4x4 aom_avg_4x4_c ++ ++unsigned int aom_avg_8x8_c(const uint8_t *, int p); ++#define aom_avg_8x8 aom_avg_8x8_c ++ ++void aom_avg_8x8_quad_c(const uint8_t *s, int p, int x16_idx, int y16_idx, int *avg); ++#define aom_avg_8x8_quad aom_avg_8x8_quad_c ++ ++void aom_blend_a64_hmask_c(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int w, int h); ++#define aom_blend_a64_hmask aom_blend_a64_hmask_c ++ ++void aom_blend_a64_mask_c(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h, int subw, int subh); ++#define aom_blend_a64_mask aom_blend_a64_mask_c ++ ++void aom_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int w, int h); ++#define aom_blend_a64_vmask aom_blend_a64_vmask_c ++ ++void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride); ++#define aom_comp_avg_pred aom_comp_avg_pred_c ++ ++void aom_comp_mask_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask); ++#define aom_comp_mask_pred aom_comp_mask_pred_c ++ ++void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h); ++#define aom_convolve8 aom_convolve8_c ++ ++void aom_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); ++#define aom_convolve8_horiz aom_convolve8_horiz_c ++ ++void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); ++#define aom_convolve8_vert aom_convolve8_vert_c ++ ++void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, int w, int h); ++#define aom_convolve_copy aom_convolve_copy_c ++ ++void aom_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_16x16 aom_dc_128_predictor_16x16_c ++ ++void aom_dc_128_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_16x32 aom_dc_128_predictor_16x32_c ++ ++void aom_dc_128_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_16x4 aom_dc_128_predictor_16x4_c ++ ++void aom_dc_128_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_16x64 aom_dc_128_predictor_16x64_c ++ ++void aom_dc_128_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_16x8 aom_dc_128_predictor_16x8_c ++ ++void aom_dc_128_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_32x16 aom_dc_128_predictor_32x16_c ++ ++void aom_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_32x32 aom_dc_128_predictor_32x32_c ++ ++void aom_dc_128_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_32x64 aom_dc_128_predictor_32x64_c ++ ++void aom_dc_128_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_32x8 aom_dc_128_predictor_32x8_c ++ ++void aom_dc_128_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_4x16 aom_dc_128_predictor_4x16_c ++ ++void aom_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_4x4 aom_dc_128_predictor_4x4_c ++ ++void aom_dc_128_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_4x8 aom_dc_128_predictor_4x8_c ++ ++void aom_dc_128_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_64x16 aom_dc_128_predictor_64x16_c ++ ++void aom_dc_128_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_64x32 aom_dc_128_predictor_64x32_c ++ ++void aom_dc_128_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_64x64 aom_dc_128_predictor_64x64_c ++ ++void aom_dc_128_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_8x16 aom_dc_128_predictor_8x16_c ++ ++void aom_dc_128_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_8x32 aom_dc_128_predictor_8x32_c ++ ++void aom_dc_128_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_8x4 aom_dc_128_predictor_8x4_c ++ ++void aom_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_128_predictor_8x8 aom_dc_128_predictor_8x8_c ++ ++void aom_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_16x16 aom_dc_left_predictor_16x16_c ++ ++void aom_dc_left_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_16x32 aom_dc_left_predictor_16x32_c ++ ++void aom_dc_left_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_16x4 aom_dc_left_predictor_16x4_c ++ ++void aom_dc_left_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_16x64 aom_dc_left_predictor_16x64_c ++ ++void aom_dc_left_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_16x8 aom_dc_left_predictor_16x8_c ++ ++void aom_dc_left_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_32x16 aom_dc_left_predictor_32x16_c ++ ++void aom_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_32x32 aom_dc_left_predictor_32x32_c ++ ++void aom_dc_left_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_32x64 aom_dc_left_predictor_32x64_c ++ ++void aom_dc_left_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_32x8 aom_dc_left_predictor_32x8_c ++ ++void aom_dc_left_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_4x16 aom_dc_left_predictor_4x16_c ++ ++void aom_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_4x4 aom_dc_left_predictor_4x4_c ++ ++void aom_dc_left_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_4x8 aom_dc_left_predictor_4x8_c ++ ++void aom_dc_left_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_64x16 aom_dc_left_predictor_64x16_c ++ ++void aom_dc_left_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_64x32 aom_dc_left_predictor_64x32_c ++ ++void aom_dc_left_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_64x64 aom_dc_left_predictor_64x64_c ++ ++void aom_dc_left_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_8x16 aom_dc_left_predictor_8x16_c ++ ++void aom_dc_left_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_8x32 aom_dc_left_predictor_8x32_c ++ ++void aom_dc_left_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_8x4 aom_dc_left_predictor_8x4_c ++ ++void aom_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_left_predictor_8x8 aom_dc_left_predictor_8x8_c ++ ++void aom_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_16x16 aom_dc_predictor_16x16_c ++ ++void aom_dc_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_16x32 aom_dc_predictor_16x32_c ++ ++void aom_dc_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_16x4 aom_dc_predictor_16x4_c ++ ++void aom_dc_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_16x64 aom_dc_predictor_16x64_c ++ ++void aom_dc_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_16x8 aom_dc_predictor_16x8_c ++ ++void aom_dc_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_32x16 aom_dc_predictor_32x16_c ++ ++void aom_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_32x32 aom_dc_predictor_32x32_c ++ ++void aom_dc_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_32x64 aom_dc_predictor_32x64_c ++ ++void aom_dc_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_32x8 aom_dc_predictor_32x8_c ++ ++void aom_dc_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_4x16 aom_dc_predictor_4x16_c ++ ++void aom_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_4x4 aom_dc_predictor_4x4_c ++ ++void aom_dc_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_4x8 aom_dc_predictor_4x8_c ++ ++void aom_dc_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_64x16 aom_dc_predictor_64x16_c ++ ++void aom_dc_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_64x32 aom_dc_predictor_64x32_c ++ ++void aom_dc_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_64x64 aom_dc_predictor_64x64_c ++ ++void aom_dc_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_8x16 aom_dc_predictor_8x16_c ++ ++void aom_dc_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_8x32 aom_dc_predictor_8x32_c ++ ++void aom_dc_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_8x4 aom_dc_predictor_8x4_c ++ ++void aom_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_predictor_8x8 aom_dc_predictor_8x8_c ++ ++void aom_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_16x16 aom_dc_top_predictor_16x16_c ++ ++void aom_dc_top_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_16x32 aom_dc_top_predictor_16x32_c ++ ++void aom_dc_top_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_16x4 aom_dc_top_predictor_16x4_c ++ ++void aom_dc_top_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_16x64 aom_dc_top_predictor_16x64_c ++ ++void aom_dc_top_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_16x8 aom_dc_top_predictor_16x8_c ++ ++void aom_dc_top_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_32x16 aom_dc_top_predictor_32x16_c ++ ++void aom_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_32x32 aom_dc_top_predictor_32x32_c ++ ++void aom_dc_top_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_32x64 aom_dc_top_predictor_32x64_c ++ ++void aom_dc_top_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_32x8 aom_dc_top_predictor_32x8_c ++ ++void aom_dc_top_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_4x16 aom_dc_top_predictor_4x16_c ++ ++void aom_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_4x4 aom_dc_top_predictor_4x4_c ++ ++void aom_dc_top_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_4x8 aom_dc_top_predictor_4x8_c ++ ++void aom_dc_top_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_64x16 aom_dc_top_predictor_64x16_c ++ ++void aom_dc_top_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_64x32 aom_dc_top_predictor_64x32_c ++ ++void aom_dc_top_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_64x64 aom_dc_top_predictor_64x64_c ++ ++void aom_dc_top_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_8x16 aom_dc_top_predictor_8x16_c ++ ++void aom_dc_top_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_8x32 aom_dc_top_predictor_8x32_c ++ ++void aom_dc_top_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_8x4 aom_dc_top_predictor_8x4_c ++ ++void aom_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_dc_top_predictor_8x8 aom_dc_top_predictor_8x8_c ++ ++void aom_dist_wtd_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_comp_avg_pred aom_dist_wtd_comp_avg_pred_c ++ ++unsigned int aom_dist_wtd_sad128x128_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad128x128_avg aom_dist_wtd_sad128x128_avg_c ++ ++unsigned int aom_dist_wtd_sad128x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad128x64_avg aom_dist_wtd_sad128x64_avg_c ++ ++unsigned int aom_dist_wtd_sad16x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad16x16_avg aom_dist_wtd_sad16x16_avg_c ++ ++unsigned int aom_dist_wtd_sad16x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad16x32_avg aom_dist_wtd_sad16x32_avg_c ++ ++unsigned int aom_dist_wtd_sad16x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad16x8_avg aom_dist_wtd_sad16x8_avg_c ++ ++unsigned int aom_dist_wtd_sad32x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad32x16_avg aom_dist_wtd_sad32x16_avg_c ++ ++unsigned int aom_dist_wtd_sad32x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad32x32_avg aom_dist_wtd_sad32x32_avg_c ++ ++unsigned int aom_dist_wtd_sad32x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad32x64_avg aom_dist_wtd_sad32x64_avg_c ++ ++unsigned int aom_dist_wtd_sad4x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad4x4_avg aom_dist_wtd_sad4x4_avg_c ++ ++unsigned int aom_dist_wtd_sad4x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad4x8_avg aom_dist_wtd_sad4x8_avg_c ++ ++unsigned int aom_dist_wtd_sad64x128_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad64x128_avg aom_dist_wtd_sad64x128_avg_c ++ ++unsigned int aom_dist_wtd_sad64x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad64x32_avg aom_dist_wtd_sad64x32_avg_c ++ ++unsigned int aom_dist_wtd_sad64x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad64x64_avg aom_dist_wtd_sad64x64_avg_c ++ ++unsigned int aom_dist_wtd_sad8x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad8x16_avg aom_dist_wtd_sad8x16_avg_c ++ ++unsigned int aom_dist_wtd_sad8x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad8x4_avg aom_dist_wtd_sad8x4_avg_c ++ ++unsigned int aom_dist_wtd_sad8x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sad8x8_avg aom_dist_wtd_sad8x8_avg_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance128x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance128x128 aom_dist_wtd_sub_pixel_avg_variance128x128_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance128x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance128x64 aom_dist_wtd_sub_pixel_avg_variance128x64_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance16x16 aom_dist_wtd_sub_pixel_avg_variance16x16_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance16x32 aom_dist_wtd_sub_pixel_avg_variance16x32_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance16x8 aom_dist_wtd_sub_pixel_avg_variance16x8_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance32x16 aom_dist_wtd_sub_pixel_avg_variance32x16_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance32x32 aom_dist_wtd_sub_pixel_avg_variance32x32_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance32x64 aom_dist_wtd_sub_pixel_avg_variance32x64_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance4x4 aom_dist_wtd_sub_pixel_avg_variance4x4_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance4x8 aom_dist_wtd_sub_pixel_avg_variance4x8_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance64x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance64x128 aom_dist_wtd_sub_pixel_avg_variance64x128_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance64x32 aom_dist_wtd_sub_pixel_avg_variance64x32_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance64x64 aom_dist_wtd_sub_pixel_avg_variance64x64_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance8x16 aom_dist_wtd_sub_pixel_avg_variance8x16_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance8x4 aom_dist_wtd_sub_pixel_avg_variance8x4_c ++ ++uint32_t aom_dist_wtd_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param); ++#define aom_dist_wtd_sub_pixel_avg_variance8x8 aom_dist_wtd_sub_pixel_avg_variance8x8_c ++ ++void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride); ++#define aom_fdct4x4 aom_fdct4x4_c ++ ++void aom_fdct4x4_lp_c(const int16_t *input, int16_t *output, int stride); ++#define aom_fdct4x4_lp aom_fdct4x4_lp_c ++ ++void aom_fft16x16_float_c(const float *input, float *temp, float *output); ++#define aom_fft16x16_float aom_fft16x16_float_c ++ ++void aom_fft2x2_float_c(const float *input, float *temp, float *output); ++#define aom_fft2x2_float aom_fft2x2_float_c ++ ++void aom_fft32x32_float_c(const float *input, float *temp, float *output); ++#define aom_fft32x32_float aom_fft32x32_float_c ++ ++void aom_fft4x4_float_c(const float *input, float *temp, float *output); ++#define aom_fft4x4_float aom_fft4x4_float_c ++ ++void aom_fft8x8_float_c(const float *input, float *temp, float *output); ++#define aom_fft8x8_float aom_fft8x8_float_c ++ ++void aom_get_blk_sse_sum_c(const int16_t *data, int stride, int bw, int bh, int *x_sum, int64_t *x2_sum); ++#define aom_get_blk_sse_sum aom_get_blk_sse_sum_c ++ ++unsigned int aom_get_mb_ss_c(const int16_t *); ++#define aom_get_mb_ss aom_get_mb_ss_c ++ ++void aom_get_var_sse_sum_16x16_dual_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse16x16, unsigned int *tot_sse, int *tot_sum, uint32_t *var16x16); ++#define aom_get_var_sse_sum_16x16_dual aom_get_var_sse_sum_16x16_dual_c ++ ++void aom_get_var_sse_sum_8x8_quad_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse8x8, int *sum8x8, unsigned int *tot_sse, int *tot_sum, uint32_t *var8x8); ++#define aom_get_var_sse_sum_8x8_quad aom_get_var_sse_sum_8x8_quad_c ++ ++void aom_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_16x16 aom_h_predictor_16x16_c ++ ++void aom_h_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_16x32 aom_h_predictor_16x32_c ++ ++void aom_h_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_16x4 aom_h_predictor_16x4_c ++ ++void aom_h_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_16x64 aom_h_predictor_16x64_c ++ ++void aom_h_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_16x8 aom_h_predictor_16x8_c ++ ++void aom_h_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_32x16 aom_h_predictor_32x16_c ++ ++void aom_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_32x32 aom_h_predictor_32x32_c ++ ++void aom_h_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_32x64 aom_h_predictor_32x64_c ++ ++void aom_h_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_32x8 aom_h_predictor_32x8_c ++ ++void aom_h_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_4x16 aom_h_predictor_4x16_c ++ ++void aom_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_4x4 aom_h_predictor_4x4_c ++ ++void aom_h_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_4x8 aom_h_predictor_4x8_c ++ ++void aom_h_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_64x16 aom_h_predictor_64x16_c ++ ++void aom_h_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_64x32 aom_h_predictor_64x32_c ++ ++void aom_h_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_64x64 aom_h_predictor_64x64_c ++ ++void aom_h_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_8x16 aom_h_predictor_8x16_c ++ ++void aom_h_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_8x32 aom_h_predictor_8x32_c ++ ++void aom_h_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_8x4 aom_h_predictor_8x4_c ++ ++void aom_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_h_predictor_8x8 aom_h_predictor_8x8_c ++ ++void aom_hadamard_16x16_c(const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff); ++#define aom_hadamard_16x16 aom_hadamard_16x16_c ++ ++void aom_hadamard_32x32_c(const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff); ++#define aom_hadamard_32x32 aom_hadamard_32x32_c ++ ++void aom_hadamard_4x4_c(const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff); ++#define aom_hadamard_4x4 aom_hadamard_4x4_c ++ ++void aom_hadamard_8x8_c(const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff); ++#define aom_hadamard_8x8 aom_hadamard_8x8_c ++ ++void aom_hadamard_lp_16x16_c(const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff); ++#define aom_hadamard_lp_16x16 aom_hadamard_lp_16x16_c ++ ++void aom_hadamard_lp_8x8_c(const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff); ++#define aom_hadamard_lp_8x8 aom_hadamard_lp_8x8_c ++ ++void aom_hadamard_lp_8x8_dual_c(const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff); ++#define aom_hadamard_lp_8x8_dual aom_hadamard_lp_8x8_dual_c ++ ++void aom_ifft16x16_float_c(const float *input, float *temp, float *output); ++#define aom_ifft16x16_float aom_ifft16x16_float_c ++ ++void aom_ifft2x2_float_c(const float *input, float *temp, float *output); ++#define aom_ifft2x2_float aom_ifft2x2_float_c ++ ++void aom_ifft32x32_float_c(const float *input, float *temp, float *output); ++#define aom_ifft32x32_float aom_ifft32x32_float_c ++ ++void aom_ifft4x4_float_c(const float *input, float *temp, float *output); ++#define aom_ifft4x4_float aom_ifft4x4_float_c ++ ++void aom_ifft8x8_float_c(const float *input, float *temp, float *output); ++#define aom_ifft8x8_float aom_ifft8x8_float_c ++ ++void aom_int_pro_col_c(int16_t *vbuf, const uint8_t *ref, const int ref_stride, const int width, const int height, int norm_factor); ++#define aom_int_pro_col aom_int_pro_col_c ++ ++void aom_int_pro_row_c(int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int width, const int height, int norm_factor); ++#define aom_int_pro_row aom_int_pro_row_c ++ ++void aom_lowbd_blend_a64_d16_mask_c(uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h, int subw, int subh, ConvolveParams *conv_params); ++#define aom_lowbd_blend_a64_d16_mask aom_lowbd_blend_a64_d16_mask_c ++ ++void aom_lpf_horizontal_14_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_horizontal_14 aom_lpf_horizontal_14_c ++ ++void aom_lpf_horizontal_14_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_horizontal_14_dual aom_lpf_horizontal_14_dual_c ++ ++void aom_lpf_horizontal_14_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_horizontal_14_quad aom_lpf_horizontal_14_quad_c ++ ++void aom_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_horizontal_4 aom_lpf_horizontal_4_c ++ ++void aom_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_horizontal_4_dual aom_lpf_horizontal_4_dual_c ++ ++void aom_lpf_horizontal_4_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_horizontal_4_quad aom_lpf_horizontal_4_quad_c ++ ++void aom_lpf_horizontal_6_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_horizontal_6 aom_lpf_horizontal_6_c ++ ++void aom_lpf_horizontal_6_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_horizontal_6_dual aom_lpf_horizontal_6_dual_c ++ ++void aom_lpf_horizontal_6_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_horizontal_6_quad aom_lpf_horizontal_6_quad_c ++ ++void aom_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_horizontal_8 aom_lpf_horizontal_8_c ++ ++void aom_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_horizontal_8_dual aom_lpf_horizontal_8_dual_c ++ ++void aom_lpf_horizontal_8_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_horizontal_8_quad aom_lpf_horizontal_8_quad_c ++ ++void aom_lpf_vertical_14_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_vertical_14 aom_lpf_vertical_14_c ++ ++void aom_lpf_vertical_14_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_vertical_14_dual aom_lpf_vertical_14_dual_c ++ ++void aom_lpf_vertical_14_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_vertical_14_quad aom_lpf_vertical_14_quad_c ++ ++void aom_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_vertical_4 aom_lpf_vertical_4_c ++ ++void aom_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_vertical_4_dual aom_lpf_vertical_4_dual_c ++ ++void aom_lpf_vertical_4_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_vertical_4_quad aom_lpf_vertical_4_quad_c ++ ++void aom_lpf_vertical_6_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_vertical_6 aom_lpf_vertical_6_c ++ ++void aom_lpf_vertical_6_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_vertical_6_dual aom_lpf_vertical_6_dual_c ++ ++void aom_lpf_vertical_6_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_vertical_6_quad aom_lpf_vertical_6_quad_c ++ ++void aom_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); ++#define aom_lpf_vertical_8 aom_lpf_vertical_8_c ++ ++void aom_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); ++#define aom_lpf_vertical_8_dual aom_lpf_vertical_8_dual_c ++ ++void aom_lpf_vertical_8_quad_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0); ++#define aom_lpf_vertical_8_quad aom_lpf_vertical_8_quad_c ++ ++unsigned int aom_masked_sad128x128_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad128x128 aom_masked_sad128x128_c ++ ++void aom_masked_sad128x128x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad128x128x4d aom_masked_sad128x128x4d_c ++ ++unsigned int aom_masked_sad128x64_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad128x64 aom_masked_sad128x64_c ++ ++void aom_masked_sad128x64x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad128x64x4d aom_masked_sad128x64x4d_c ++ ++unsigned int aom_masked_sad16x16_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad16x16 aom_masked_sad16x16_c ++ ++void aom_masked_sad16x16x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad16x16x4d aom_masked_sad16x16x4d_c ++ ++unsigned int aom_masked_sad16x32_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad16x32 aom_masked_sad16x32_c ++ ++void aom_masked_sad16x32x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad16x32x4d aom_masked_sad16x32x4d_c ++ ++unsigned int aom_masked_sad16x8_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad16x8 aom_masked_sad16x8_c ++ ++void aom_masked_sad16x8x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad16x8x4d aom_masked_sad16x8x4d_c ++ ++unsigned int aom_masked_sad32x16_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad32x16 aom_masked_sad32x16_c ++ ++void aom_masked_sad32x16x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad32x16x4d aom_masked_sad32x16x4d_c ++ ++unsigned int aom_masked_sad32x32_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad32x32 aom_masked_sad32x32_c ++ ++void aom_masked_sad32x32x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad32x32x4d aom_masked_sad32x32x4d_c ++ ++unsigned int aom_masked_sad32x64_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad32x64 aom_masked_sad32x64_c ++ ++void aom_masked_sad32x64x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad32x64x4d aom_masked_sad32x64x4d_c ++ ++unsigned int aom_masked_sad4x4_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad4x4 aom_masked_sad4x4_c ++ ++void aom_masked_sad4x4x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad4x4x4d aom_masked_sad4x4x4d_c ++ ++unsigned int aom_masked_sad4x8_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad4x8 aom_masked_sad4x8_c ++ ++void aom_masked_sad4x8x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad4x8x4d aom_masked_sad4x8x4d_c ++ ++unsigned int aom_masked_sad64x128_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad64x128 aom_masked_sad64x128_c ++ ++void aom_masked_sad64x128x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad64x128x4d aom_masked_sad64x128x4d_c ++ ++unsigned int aom_masked_sad64x32_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad64x32 aom_masked_sad64x32_c ++ ++void aom_masked_sad64x32x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad64x32x4d aom_masked_sad64x32x4d_c ++ ++unsigned int aom_masked_sad64x64_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad64x64 aom_masked_sad64x64_c ++ ++void aom_masked_sad64x64x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad64x64x4d aom_masked_sad64x64x4d_c ++ ++unsigned int aom_masked_sad8x16_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad8x16 aom_masked_sad8x16_c ++ ++void aom_masked_sad8x16x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad8x16x4d aom_masked_sad8x16x4d_c ++ ++unsigned int aom_masked_sad8x4_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad8x4 aom_masked_sad8x4_c ++ ++void aom_masked_sad8x4x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad8x4x4d aom_masked_sad8x4x4d_c ++ ++unsigned int aom_masked_sad8x8_c(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask); ++#define aom_masked_sad8x8 aom_masked_sad8x8_c ++ ++void aom_masked_sad8x8x4d_c(const uint8_t *src, int src_stride, const uint8_t *ref[4], int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned sads[4]); ++#define aom_masked_sad8x8x4d aom_masked_sad8x8x4d_c ++ ++unsigned int aom_masked_sub_pixel_variance128x128_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance128x128 aom_masked_sub_pixel_variance128x128_c ++ ++unsigned int aom_masked_sub_pixel_variance128x64_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance128x64 aom_masked_sub_pixel_variance128x64_c ++ ++unsigned int aom_masked_sub_pixel_variance16x16_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance16x16 aom_masked_sub_pixel_variance16x16_c ++ ++unsigned int aom_masked_sub_pixel_variance16x32_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance16x32 aom_masked_sub_pixel_variance16x32_c ++ ++unsigned int aom_masked_sub_pixel_variance16x8_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance16x8 aom_masked_sub_pixel_variance16x8_c ++ ++unsigned int aom_masked_sub_pixel_variance32x16_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance32x16 aom_masked_sub_pixel_variance32x16_c ++ ++unsigned int aom_masked_sub_pixel_variance32x32_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance32x32 aom_masked_sub_pixel_variance32x32_c ++ ++unsigned int aom_masked_sub_pixel_variance32x64_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance32x64 aom_masked_sub_pixel_variance32x64_c ++ ++unsigned int aom_masked_sub_pixel_variance4x4_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance4x4 aom_masked_sub_pixel_variance4x4_c ++ ++unsigned int aom_masked_sub_pixel_variance4x8_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance4x8 aom_masked_sub_pixel_variance4x8_c ++ ++unsigned int aom_masked_sub_pixel_variance64x128_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance64x128 aom_masked_sub_pixel_variance64x128_c ++ ++unsigned int aom_masked_sub_pixel_variance64x32_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance64x32 aom_masked_sub_pixel_variance64x32_c ++ ++unsigned int aom_masked_sub_pixel_variance64x64_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance64x64 aom_masked_sub_pixel_variance64x64_c ++ ++unsigned int aom_masked_sub_pixel_variance8x16_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance8x16 aom_masked_sub_pixel_variance8x16_c ++ ++unsigned int aom_masked_sub_pixel_variance8x4_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance8x4 aom_masked_sub_pixel_variance8x4_c ++ ++unsigned int aom_masked_sub_pixel_variance8x8_c(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse); ++#define aom_masked_sub_pixel_variance8x8 aom_masked_sub_pixel_variance8x8_c ++ ++void aom_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max); ++#define aom_minmax_8x8 aom_minmax_8x8_c ++ ++unsigned int aom_mse16x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); ++#define aom_mse16x16 aom_mse16x16_c ++ ++unsigned int aom_mse16x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); ++#define aom_mse16x8 aom_mse16x8_c ++ ++unsigned int aom_mse8x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); ++#define aom_mse8x16 aom_mse8x16_c ++ ++unsigned int aom_mse8x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); ++#define aom_mse8x8 aom_mse8x8_c ++ ++uint64_t aom_mse_16xh_16bit_c(uint8_t *dst, int dstride,uint16_t *src, int w, int h); ++#define aom_mse_16xh_16bit aom_mse_16xh_16bit_c ++ ++uint64_t aom_mse_wxh_16bit_c(uint8_t *dst, int dstride,uint16_t *src, int sstride, int w, int h); ++#define aom_mse_wxh_16bit aom_mse_wxh_16bit_c ++ ++void aom_paeth_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_16x16 aom_paeth_predictor_16x16_c ++ ++void aom_paeth_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_16x32 aom_paeth_predictor_16x32_c ++ ++void aom_paeth_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_16x4 aom_paeth_predictor_16x4_c ++ ++void aom_paeth_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_16x64 aom_paeth_predictor_16x64_c ++ ++void aom_paeth_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_16x8 aom_paeth_predictor_16x8_c ++ ++void aom_paeth_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_32x16 aom_paeth_predictor_32x16_c ++ ++void aom_paeth_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_32x32 aom_paeth_predictor_32x32_c ++ ++void aom_paeth_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_32x64 aom_paeth_predictor_32x64_c ++ ++void aom_paeth_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_32x8 aom_paeth_predictor_32x8_c ++ ++void aom_paeth_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_4x16 aom_paeth_predictor_4x16_c ++ ++void aom_paeth_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_4x4 aom_paeth_predictor_4x4_c ++ ++void aom_paeth_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_4x8 aom_paeth_predictor_4x8_c ++ ++void aom_paeth_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_64x16 aom_paeth_predictor_64x16_c ++ ++void aom_paeth_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_64x32 aom_paeth_predictor_64x32_c ++ ++void aom_paeth_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_64x64 aom_paeth_predictor_64x64_c ++ ++void aom_paeth_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_8x16 aom_paeth_predictor_8x16_c ++ ++void aom_paeth_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_8x32 aom_paeth_predictor_8x32_c ++ ++void aom_paeth_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_8x4 aom_paeth_predictor_8x4_c ++ ++void aom_paeth_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_paeth_predictor_8x8 aom_paeth_predictor_8x8_c ++ ++void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define aom_quantize_b aom_quantize_b_c ++ ++void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define aom_quantize_b_32x32 aom_quantize_b_32x32_c ++ ++void aom_quantize_b_64x64_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define aom_quantize_b_64x64 aom_quantize_b_64x64_c ++ ++unsigned int aom_sad128x128_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad128x128 aom_sad128x128_c ++ ++unsigned int aom_sad128x128_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad128x128_avg aom_sad128x128_avg_c ++ ++void aom_sad128x128x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad128x128x3d aom_sad128x128x3d_c ++ ++void aom_sad128x128x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad128x128x4d aom_sad128x128x4d_c ++ ++unsigned int aom_sad128x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad128x64 aom_sad128x64_c ++ ++unsigned int aom_sad128x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad128x64_avg aom_sad128x64_avg_c ++ ++void aom_sad128x64x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad128x64x3d aom_sad128x64x3d_c ++ ++void aom_sad128x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad128x64x4d aom_sad128x64x4d_c ++ ++unsigned int aom_sad16x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad16x16 aom_sad16x16_c ++ ++unsigned int aom_sad16x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad16x16_avg aom_sad16x16_avg_c ++ ++void aom_sad16x16x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x16x3d aom_sad16x16x3d_c ++ ++void aom_sad16x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x16x4d aom_sad16x16x4d_c ++ ++unsigned int aom_sad16x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad16x32 aom_sad16x32_c ++ ++unsigned int aom_sad16x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad16x32_avg aom_sad16x32_avg_c ++ ++void aom_sad16x32x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x32x3d aom_sad16x32x3d_c ++ ++void aom_sad16x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x32x4d aom_sad16x32x4d_c ++ ++unsigned int aom_sad16x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad16x8 aom_sad16x8_c ++ ++unsigned int aom_sad16x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad16x8_avg aom_sad16x8_avg_c ++ ++void aom_sad16x8x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x8x3d aom_sad16x8x3d_c ++ ++void aom_sad16x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad16x8x4d aom_sad16x8x4d_c ++ ++unsigned int aom_sad32x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad32x16 aom_sad32x16_c ++ ++unsigned int aom_sad32x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad32x16_avg aom_sad32x16_avg_c ++ ++void aom_sad32x16x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x16x3d aom_sad32x16x3d_c ++ ++void aom_sad32x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x16x4d aom_sad32x16x4d_c ++ ++unsigned int aom_sad32x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad32x32 aom_sad32x32_c ++ ++unsigned int aom_sad32x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad32x32_avg aom_sad32x32_avg_c ++ ++void aom_sad32x32x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x32x3d aom_sad32x32x3d_c ++ ++void aom_sad32x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x32x4d aom_sad32x32x4d_c ++ ++unsigned int aom_sad32x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad32x64 aom_sad32x64_c ++ ++unsigned int aom_sad32x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad32x64_avg aom_sad32x64_avg_c ++ ++void aom_sad32x64x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x64x3d aom_sad32x64x3d_c ++ ++void aom_sad32x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad32x64x4d aom_sad32x64x4d_c ++ ++unsigned int aom_sad4x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad4x4 aom_sad4x4_c ++ ++unsigned int aom_sad4x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad4x4_avg aom_sad4x4_avg_c ++ ++void aom_sad4x4x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad4x4x3d aom_sad4x4x3d_c ++ ++void aom_sad4x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad4x4x4d aom_sad4x4x4d_c ++ ++unsigned int aom_sad4x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad4x8 aom_sad4x8_c ++ ++unsigned int aom_sad4x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad4x8_avg aom_sad4x8_avg_c ++ ++void aom_sad4x8x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad4x8x3d aom_sad4x8x3d_c ++ ++void aom_sad4x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad4x8x4d aom_sad4x8x4d_c ++ ++unsigned int aom_sad64x128_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad64x128 aom_sad64x128_c ++ ++unsigned int aom_sad64x128_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad64x128_avg aom_sad64x128_avg_c ++ ++void aom_sad64x128x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x128x3d aom_sad64x128x3d_c ++ ++void aom_sad64x128x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x128x4d aom_sad64x128x4d_c ++ ++unsigned int aom_sad64x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad64x32 aom_sad64x32_c ++ ++unsigned int aom_sad64x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad64x32_avg aom_sad64x32_avg_c ++ ++void aom_sad64x32x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x32x3d aom_sad64x32x3d_c ++ ++void aom_sad64x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x32x4d aom_sad64x32x4d_c ++ ++unsigned int aom_sad64x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad64x64 aom_sad64x64_c ++ ++unsigned int aom_sad64x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad64x64_avg aom_sad64x64_avg_c ++ ++void aom_sad64x64x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x64x3d aom_sad64x64x3d_c ++ ++void aom_sad64x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad64x64x4d aom_sad64x64x4d_c ++ ++unsigned int aom_sad8x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad8x16 aom_sad8x16_c ++ ++unsigned int aom_sad8x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad8x16_avg aom_sad8x16_avg_c ++ ++void aom_sad8x16x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x16x3d aom_sad8x16x3d_c ++ ++void aom_sad8x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x16x4d aom_sad8x16x4d_c ++ ++unsigned int aom_sad8x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad8x4 aom_sad8x4_c ++ ++unsigned int aom_sad8x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad8x4_avg aom_sad8x4_avg_c ++ ++void aom_sad8x4x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x4x3d aom_sad8x4x3d_c ++ ++void aom_sad8x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x4x4d aom_sad8x4x4d_c ++ ++unsigned int aom_sad8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad8x8 aom_sad8x8_c ++ ++unsigned int aom_sad8x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); ++#define aom_sad8x8_avg aom_sad8x8_avg_c ++ ++void aom_sad8x8x3d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x8x3d aom_sad8x8x3d_c ++ ++void aom_sad8x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad8x8x4d aom_sad8x8x4d_c ++ ++unsigned int aom_sad_skip_128x128_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_128x128 aom_sad_skip_128x128_c ++ ++void aom_sad_skip_128x128x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_128x128x4d aom_sad_skip_128x128x4d_c ++ ++unsigned int aom_sad_skip_128x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_128x64 aom_sad_skip_128x64_c ++ ++void aom_sad_skip_128x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_128x64x4d aom_sad_skip_128x64x4d_c ++ ++unsigned int aom_sad_skip_16x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_16x16 aom_sad_skip_16x16_c ++ ++void aom_sad_skip_16x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_16x16x4d aom_sad_skip_16x16x4d_c ++ ++unsigned int aom_sad_skip_16x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_16x32 aom_sad_skip_16x32_c ++ ++void aom_sad_skip_16x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_16x32x4d aom_sad_skip_16x32x4d_c ++ ++unsigned int aom_sad_skip_16x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_16x8 aom_sad_skip_16x8_c ++ ++void aom_sad_skip_16x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_16x8x4d aom_sad_skip_16x8x4d_c ++ ++unsigned int aom_sad_skip_32x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_32x16 aom_sad_skip_32x16_c ++ ++void aom_sad_skip_32x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_32x16x4d aom_sad_skip_32x16x4d_c ++ ++unsigned int aom_sad_skip_32x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_32x32 aom_sad_skip_32x32_c ++ ++void aom_sad_skip_32x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_32x32x4d aom_sad_skip_32x32x4d_c ++ ++unsigned int aom_sad_skip_32x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_32x64 aom_sad_skip_32x64_c ++ ++void aom_sad_skip_32x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_32x64x4d aom_sad_skip_32x64x4d_c ++ ++unsigned int aom_sad_skip_4x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_4x4 aom_sad_skip_4x4_c ++ ++void aom_sad_skip_4x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_4x4x4d aom_sad_skip_4x4x4d_c ++ ++unsigned int aom_sad_skip_4x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_4x8 aom_sad_skip_4x8_c ++ ++void aom_sad_skip_4x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_4x8x4d aom_sad_skip_4x8x4d_c ++ ++unsigned int aom_sad_skip_64x128_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_64x128 aom_sad_skip_64x128_c ++ ++void aom_sad_skip_64x128x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_64x128x4d aom_sad_skip_64x128x4d_c ++ ++unsigned int aom_sad_skip_64x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_64x32 aom_sad_skip_64x32_c ++ ++void aom_sad_skip_64x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_64x32x4d aom_sad_skip_64x32x4d_c ++ ++unsigned int aom_sad_skip_64x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_64x64 aom_sad_skip_64x64_c ++ ++void aom_sad_skip_64x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_64x64x4d aom_sad_skip_64x64x4d_c ++ ++unsigned int aom_sad_skip_8x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_8x16 aom_sad_skip_8x16_c ++ ++void aom_sad_skip_8x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_8x16x4d aom_sad_skip_8x16x4d_c ++ ++unsigned int aom_sad_skip_8x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_8x4 aom_sad_skip_8x4_c ++ ++void aom_sad_skip_8x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_8x4x4d aom_sad_skip_8x4x4d_c ++ ++unsigned int aom_sad_skip_8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); ++#define aom_sad_skip_8x8 aom_sad_skip_8x8_c ++ ++void aom_sad_skip_8x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[4], int ref_stride, uint32_t sad_array[4]); ++#define aom_sad_skip_8x8x4d aom_sad_skip_8x8x4d_c ++ ++int aom_satd_c(const tran_low_t *coeff, int length); ++#define aom_satd aom_satd_c ++ ++int aom_satd_lp_c(const int16_t *coeff, int length); ++#define aom_satd_lp aom_satd_lp_c ++ ++void aom_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h); ++#define aom_scaled_2d aom_scaled_2d_c ++ ++void aom_smooth_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_16x16 aom_smooth_h_predictor_16x16_c ++ ++void aom_smooth_h_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_16x32 aom_smooth_h_predictor_16x32_c ++ ++void aom_smooth_h_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_16x4 aom_smooth_h_predictor_16x4_c ++ ++void aom_smooth_h_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_16x64 aom_smooth_h_predictor_16x64_c ++ ++void aom_smooth_h_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_16x8 aom_smooth_h_predictor_16x8_c ++ ++void aom_smooth_h_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_32x16 aom_smooth_h_predictor_32x16_c ++ ++void aom_smooth_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_32x32 aom_smooth_h_predictor_32x32_c ++ ++void aom_smooth_h_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_32x64 aom_smooth_h_predictor_32x64_c ++ ++void aom_smooth_h_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_32x8 aom_smooth_h_predictor_32x8_c ++ ++void aom_smooth_h_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_4x16 aom_smooth_h_predictor_4x16_c ++ ++void aom_smooth_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_4x4 aom_smooth_h_predictor_4x4_c ++ ++void aom_smooth_h_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_4x8 aom_smooth_h_predictor_4x8_c ++ ++void aom_smooth_h_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_64x16 aom_smooth_h_predictor_64x16_c ++ ++void aom_smooth_h_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_64x32 aom_smooth_h_predictor_64x32_c ++ ++void aom_smooth_h_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_64x64 aom_smooth_h_predictor_64x64_c ++ ++void aom_smooth_h_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_8x16 aom_smooth_h_predictor_8x16_c ++ ++void aom_smooth_h_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_8x32 aom_smooth_h_predictor_8x32_c ++ ++void aom_smooth_h_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_8x4 aom_smooth_h_predictor_8x4_c ++ ++void aom_smooth_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_h_predictor_8x8 aom_smooth_h_predictor_8x8_c ++ ++void aom_smooth_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_16x16 aom_smooth_predictor_16x16_c ++ ++void aom_smooth_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_16x32 aom_smooth_predictor_16x32_c ++ ++void aom_smooth_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_16x4 aom_smooth_predictor_16x4_c ++ ++void aom_smooth_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_16x64 aom_smooth_predictor_16x64_c ++ ++void aom_smooth_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_16x8 aom_smooth_predictor_16x8_c ++ ++void aom_smooth_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_32x16 aom_smooth_predictor_32x16_c ++ ++void aom_smooth_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_32x32 aom_smooth_predictor_32x32_c ++ ++void aom_smooth_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_32x64 aom_smooth_predictor_32x64_c ++ ++void aom_smooth_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_32x8 aom_smooth_predictor_32x8_c ++ ++void aom_smooth_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_4x16 aom_smooth_predictor_4x16_c ++ ++void aom_smooth_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_4x4 aom_smooth_predictor_4x4_c ++ ++void aom_smooth_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_4x8 aom_smooth_predictor_4x8_c ++ ++void aom_smooth_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_64x16 aom_smooth_predictor_64x16_c ++ ++void aom_smooth_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_64x32 aom_smooth_predictor_64x32_c ++ ++void aom_smooth_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_64x64 aom_smooth_predictor_64x64_c ++ ++void aom_smooth_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_8x16 aom_smooth_predictor_8x16_c ++ ++void aom_smooth_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_8x32 aom_smooth_predictor_8x32_c ++ ++void aom_smooth_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_8x4 aom_smooth_predictor_8x4_c ++ ++void aom_smooth_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_predictor_8x8 aom_smooth_predictor_8x8_c ++ ++void aom_smooth_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_16x16 aom_smooth_v_predictor_16x16_c ++ ++void aom_smooth_v_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_16x32 aom_smooth_v_predictor_16x32_c ++ ++void aom_smooth_v_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_16x4 aom_smooth_v_predictor_16x4_c ++ ++void aom_smooth_v_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_16x64 aom_smooth_v_predictor_16x64_c ++ ++void aom_smooth_v_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_16x8 aom_smooth_v_predictor_16x8_c ++ ++void aom_smooth_v_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_32x16 aom_smooth_v_predictor_32x16_c ++ ++void aom_smooth_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_32x32 aom_smooth_v_predictor_32x32_c ++ ++void aom_smooth_v_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_32x64 aom_smooth_v_predictor_32x64_c ++ ++void aom_smooth_v_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_32x8 aom_smooth_v_predictor_32x8_c ++ ++void aom_smooth_v_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_4x16 aom_smooth_v_predictor_4x16_c ++ ++void aom_smooth_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_4x4 aom_smooth_v_predictor_4x4_c ++ ++void aom_smooth_v_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_4x8 aom_smooth_v_predictor_4x8_c ++ ++void aom_smooth_v_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_64x16 aom_smooth_v_predictor_64x16_c ++ ++void aom_smooth_v_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_64x32 aom_smooth_v_predictor_64x32_c ++ ++void aom_smooth_v_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_64x64 aom_smooth_v_predictor_64x64_c ++ ++void aom_smooth_v_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_8x16 aom_smooth_v_predictor_8x16_c ++ ++void aom_smooth_v_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_8x32 aom_smooth_v_predictor_8x32_c ++ ++void aom_smooth_v_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_8x4 aom_smooth_v_predictor_8x4_c ++ ++void aom_smooth_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_smooth_v_predictor_8x8 aom_smooth_v_predictor_8x8_c ++ ++int64_t aom_sse_c(const uint8_t *a, int a_stride, const uint8_t *b,int b_stride, int width, int height); ++#define aom_sse aom_sse_c ++ ++void aom_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr); ++#define aom_ssim_parms_8x8 aom_ssim_parms_8x8_c ++ ++uint32_t aom_sub_pixel_avg_variance128x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance128x128 aom_sub_pixel_avg_variance128x128_c ++ ++uint32_t aom_sub_pixel_avg_variance128x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance128x64 aom_sub_pixel_avg_variance128x64_c ++ ++uint32_t aom_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance16x16 aom_sub_pixel_avg_variance16x16_c ++ ++uint32_t aom_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance16x32 aom_sub_pixel_avg_variance16x32_c ++ ++uint32_t aom_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance16x8 aom_sub_pixel_avg_variance16x8_c ++ ++uint32_t aom_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance32x16 aom_sub_pixel_avg_variance32x16_c ++ ++uint32_t aom_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance32x32 aom_sub_pixel_avg_variance32x32_c ++ ++uint32_t aom_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance32x64 aom_sub_pixel_avg_variance32x64_c ++ ++uint32_t aom_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance4x4 aom_sub_pixel_avg_variance4x4_c ++ ++uint32_t aom_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance4x8 aom_sub_pixel_avg_variance4x8_c ++ ++uint32_t aom_sub_pixel_avg_variance64x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance64x128 aom_sub_pixel_avg_variance64x128_c ++ ++uint32_t aom_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance64x32 aom_sub_pixel_avg_variance64x32_c ++ ++uint32_t aom_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance64x64 aom_sub_pixel_avg_variance64x64_c ++ ++uint32_t aom_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance8x16 aom_sub_pixel_avg_variance8x16_c ++ ++uint32_t aom_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance8x4 aom_sub_pixel_avg_variance8x4_c ++ ++uint32_t aom_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); ++#define aom_sub_pixel_avg_variance8x8 aom_sub_pixel_avg_variance8x8_c ++ ++uint32_t aom_sub_pixel_variance128x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance128x128 aom_sub_pixel_variance128x128_c ++ ++uint32_t aom_sub_pixel_variance128x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance128x64 aom_sub_pixel_variance128x64_c ++ ++uint32_t aom_sub_pixel_variance16x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance16x16 aom_sub_pixel_variance16x16_c ++ ++uint32_t aom_sub_pixel_variance16x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance16x32 aom_sub_pixel_variance16x32_c ++ ++uint32_t aom_sub_pixel_variance16x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance16x8 aom_sub_pixel_variance16x8_c ++ ++uint32_t aom_sub_pixel_variance32x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance32x16 aom_sub_pixel_variance32x16_c ++ ++uint32_t aom_sub_pixel_variance32x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance32x32 aom_sub_pixel_variance32x32_c ++ ++uint32_t aom_sub_pixel_variance32x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance32x64 aom_sub_pixel_variance32x64_c ++ ++uint32_t aom_sub_pixel_variance4x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance4x4 aom_sub_pixel_variance4x4_c ++ ++uint32_t aom_sub_pixel_variance4x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance4x8 aom_sub_pixel_variance4x8_c ++ ++uint32_t aom_sub_pixel_variance64x128_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance64x128 aom_sub_pixel_variance64x128_c ++ ++uint32_t aom_sub_pixel_variance64x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance64x32 aom_sub_pixel_variance64x32_c ++ ++uint32_t aom_sub_pixel_variance64x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance64x64 aom_sub_pixel_variance64x64_c ++ ++uint32_t aom_sub_pixel_variance8x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance8x16 aom_sub_pixel_variance8x16_c ++ ++uint32_t aom_sub_pixel_variance8x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance8x4 aom_sub_pixel_variance8x4_c ++ ++uint32_t aom_sub_pixel_variance8x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); ++#define aom_sub_pixel_variance8x8 aom_sub_pixel_variance8x8_c ++ ++void aom_subtract_block_c(int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride); ++#define aom_subtract_block aom_subtract_block_c ++ ++uint64_t aom_sum_squares_2d_i16_c(const int16_t *src, int stride, int width, int height); ++#define aom_sum_squares_2d_i16 aom_sum_squares_2d_i16_c ++ ++uint64_t aom_sum_squares_i16_c(const int16_t *src, uint32_t N); ++#define aom_sum_squares_i16 aom_sum_squares_i16_c ++ ++uint64_t aom_sum_sse_2d_i16_c(const int16_t *src, int src_stride, int width, int height, int *sum); ++#define aom_sum_sse_2d_i16 aom_sum_sse_2d_i16_c ++ ++void aom_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_16x16 aom_v_predictor_16x16_c ++ ++void aom_v_predictor_16x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_16x32 aom_v_predictor_16x32_c ++ ++void aom_v_predictor_16x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_16x4 aom_v_predictor_16x4_c ++ ++void aom_v_predictor_16x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_16x64 aom_v_predictor_16x64_c ++ ++void aom_v_predictor_16x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_16x8 aom_v_predictor_16x8_c ++ ++void aom_v_predictor_32x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_32x16 aom_v_predictor_32x16_c ++ ++void aom_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_32x32 aom_v_predictor_32x32_c ++ ++void aom_v_predictor_32x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_32x64 aom_v_predictor_32x64_c ++ ++void aom_v_predictor_32x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_32x8 aom_v_predictor_32x8_c ++ ++void aom_v_predictor_4x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_4x16 aom_v_predictor_4x16_c ++ ++void aom_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_4x4 aom_v_predictor_4x4_c ++ ++void aom_v_predictor_4x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_4x8 aom_v_predictor_4x8_c ++ ++void aom_v_predictor_64x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_64x16 aom_v_predictor_64x16_c ++ ++void aom_v_predictor_64x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_64x32 aom_v_predictor_64x32_c ++ ++void aom_v_predictor_64x64_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_64x64 aom_v_predictor_64x64_c ++ ++void aom_v_predictor_8x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_8x16 aom_v_predictor_8x16_c ++ ++void aom_v_predictor_8x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_8x32 aom_v_predictor_8x32_c ++ ++void aom_v_predictor_8x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_8x4 aom_v_predictor_8x4_c ++ ++void aom_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); ++#define aom_v_predictor_8x8 aom_v_predictor_8x8_c ++ ++uint64_t aom_var_2d_u16_c(uint8_t *src, int src_stride, int width, int height); ++#define aom_var_2d_u16 aom_var_2d_u16_c ++ ++uint64_t aom_var_2d_u8_c(uint8_t *src, int src_stride, int width, int height); ++#define aom_var_2d_u8 aom_var_2d_u8_c ++ ++unsigned int aom_variance128x128_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance128x128 aom_variance128x128_c ++ ++unsigned int aom_variance128x64_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance128x64 aom_variance128x64_c ++ ++unsigned int aom_variance16x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance16x16 aom_variance16x16_c ++ ++unsigned int aom_variance16x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance16x32 aom_variance16x32_c ++ ++unsigned int aom_variance16x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance16x8 aom_variance16x8_c ++ ++unsigned int aom_variance32x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance32x16 aom_variance32x16_c ++ ++unsigned int aom_variance32x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance32x32 aom_variance32x32_c ++ ++unsigned int aom_variance32x64_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance32x64 aom_variance32x64_c ++ ++unsigned int aom_variance4x4_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance4x4 aom_variance4x4_c ++ ++unsigned int aom_variance4x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance4x8 aom_variance4x8_c ++ ++unsigned int aom_variance64x128_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance64x128 aom_variance64x128_c ++ ++unsigned int aom_variance64x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance64x32 aom_variance64x32_c ++ ++unsigned int aom_variance64x64_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance64x64 aom_variance64x64_c ++ ++unsigned int aom_variance8x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance8x16 aom_variance8x16_c ++ ++unsigned int aom_variance8x4_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance8x4 aom_variance8x4_c ++ ++unsigned int aom_variance8x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); ++#define aom_variance8x8 aom_variance8x8_c ++ ++int aom_vector_var_c(const int16_t *ref, const int16_t *src, int bwl); ++#define aom_vector_var aom_vector_var_c ++ ++void aom_dsp_rtcd(void); ++ ++#include "config/aom_config.h" ++ ++#ifdef RTCD_C ++#include "aom_ports/ppc.h" ++static void setup_rtcd_internal(void) ++{ ++ int flags = ppc_simd_caps(); ++ ++ (void)flags; ++ ++} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_scale_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/aom_scale_rtcd.h +@@ -0,0 +1,105 @@ ++// This file is generated. Do not edit. ++#ifndef AOM_SCALE_RTCD_H_ ++#define AOM_SCALE_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void aom_extend_frame_borders_c(struct yv12_buffer_config *ybf, const int num_planes); ++#define aom_extend_frame_borders aom_extend_frame_borders_c ++ ++void aom_extend_frame_borders_plane_row_c(const struct yv12_buffer_config *ybf, int plane, int v_start, int v_end); ++#define aom_extend_frame_borders_plane_row aom_extend_frame_borders_plane_row_c ++ ++void aom_extend_frame_borders_y_c(struct yv12_buffer_config *ybf); ++#define aom_extend_frame_borders_y aom_extend_frame_borders_y_c ++ ++void aom_extend_frame_inner_borders_c(struct yv12_buffer_config *ybf, const int num_planes); ++#define aom_extend_frame_inner_borders aom_extend_frame_inner_borders_c ++ ++void aom_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width); ++#define aom_horizontal_line_2_1_scale aom_horizontal_line_2_1_scale_c ++ ++void aom_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width); ++#define aom_horizontal_line_5_3_scale aom_horizontal_line_5_3_scale_c ++ ++void aom_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width); ++#define aom_horizontal_line_5_4_scale aom_horizontal_line_5_4_scale_c ++ ++void aom_vertical_band_2_1_scale_c(unsigned char *source, int src_pitch, unsigned char *dest, int dest_pitch, unsigned int dest_width); ++#define aom_vertical_band_2_1_scale aom_vertical_band_2_1_scale_c ++ ++void aom_vertical_band_2_1_scale_i_c(unsigned char *source, int src_pitch, unsigned char *dest, int dest_pitch, unsigned int dest_width); ++#define aom_vertical_band_2_1_scale_i aom_vertical_band_2_1_scale_i_c ++ ++void aom_vertical_band_5_3_scale_c(unsigned char *source, int src_pitch, unsigned char *dest, int dest_pitch, unsigned int dest_width); ++#define aom_vertical_band_5_3_scale aom_vertical_band_5_3_scale_c ++ ++void aom_vertical_band_5_4_scale_c(unsigned char *source, int src_pitch, unsigned char *dest, int dest_pitch, unsigned int dest_width); ++#define aom_vertical_band_5_4_scale aom_vertical_band_5_4_scale_c ++ ++void aom_yv12_copy_frame_c(const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, const int num_planes); ++#define aom_yv12_copy_frame aom_yv12_copy_frame_c ++ ++void aom_yv12_copy_u_c(const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc); ++#define aom_yv12_copy_u aom_yv12_copy_u_c ++ ++void aom_yv12_copy_v_c(const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc); ++#define aom_yv12_copy_v aom_yv12_copy_v_c ++ ++void aom_yv12_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc); ++#define aom_yv12_copy_y aom_yv12_copy_y_c ++ ++void aom_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf, const int num_planes); ++#define aom_yv12_extend_frame_borders aom_yv12_extend_frame_borders_c ++ ++void aom_yv12_partial_coloc_copy_u_c(const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, int hstart, int hend, int vstart, int vend); ++#define aom_yv12_partial_coloc_copy_u aom_yv12_partial_coloc_copy_u_c ++ ++void aom_yv12_partial_coloc_copy_v_c(const struct yv12_buffer_config *src_bc, struct yv12_buffer_config *dst_bc, int hstart, int hend, int vstart, int vend); ++#define aom_yv12_partial_coloc_copy_v aom_yv12_partial_coloc_copy_v_c ++ ++void aom_yv12_partial_coloc_copy_y_c(const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int hstart, int hend, int vstart, int vend); ++#define aom_yv12_partial_coloc_copy_y aom_yv12_partial_coloc_copy_y_c ++ ++void aom_yv12_partial_copy_u_c(const struct yv12_buffer_config *src_bc, int hstart1, int hend1, int vstart1, int vend1, struct yv12_buffer_config *dst_bc, int hstart2, int vstart2); ++#define aom_yv12_partial_copy_u aom_yv12_partial_copy_u_c ++ ++void aom_yv12_partial_copy_v_c(const struct yv12_buffer_config *src_bc, int hstart1, int hend1, int vstart1, int vend1, struct yv12_buffer_config *dst_bc, int hstart2, int vstart2); ++#define aom_yv12_partial_copy_v aom_yv12_partial_copy_v_c ++ ++void aom_yv12_partial_copy_y_c(const struct yv12_buffer_config *src_ybc, int hstart1, int hend1, int vstart1, int vend1, struct yv12_buffer_config *dst_ybc, int hstart2, int vstart2); ++#define aom_yv12_partial_copy_y aom_yv12_partial_copy_y_c ++ ++int aom_yv12_realloc_with_new_border_c(struct yv12_buffer_config *ybf, int new_border, int byte_alignment, int num_pyramid_levels, int num_planes); ++#define aom_yv12_realloc_with_new_border aom_yv12_realloc_with_new_border_c ++ ++void aom_scale_rtcd(void); ++ ++#include "config/aom_config.h" ++ ++#ifdef RTCD_C ++#include "aom_ports/ppc.h" ++static void setup_rtcd_internal(void) ++{ ++ int flags = ppc_simd_caps(); ++ ++ (void)flags; ++ ++} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +Index: chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/av1_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libaom/source/config/linux/ppc64/config/av1_rtcd.h +@@ -0,0 +1,478 @@ ++// This file is generated. Do not edit. ++#ifndef AV1_RTCD_H_ ++#define AV1_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * AV1 ++ */ ++ ++#include "aom/aom_integer.h" ++#include "aom_dsp/odintrin.h" ++#include "aom_dsp/txfm_common.h" ++#include "av1/common/av1_txfm.h" ++#include "av1/common/common.h" ++#include "av1/common/convolve.h" ++#include "av1/common/enums.h" ++#include "av1/common/filter.h" ++#include "av1/common/quant_common.h" ++#include "av1/common/restoration.h" ++ ++struct macroblockd; ++ ++/* Encoder forward decls */ ++struct macroblock; ++struct txfm_param; ++struct aom_variance_vtable; ++struct search_site_config; ++struct yv12_buffer_config; ++struct NN_CONFIG; ++typedef struct NN_CONFIG NN_CONFIG; ++ ++enum { NONE, RELU, SOFTSIGN, SIGMOID } UENUM1BYTE(ACTIVATION); ++#if CONFIG_NN_V2 ++enum { SOFTMAX_CROSS_ENTROPY } UENUM1BYTE(LOSS); ++struct NN_CONFIG_V2; ++typedef struct NN_CONFIG_V2 NN_CONFIG_V2; ++struct FC_LAYER; ++typedef struct FC_LAYER FC_LAYER; ++#endif // CONFIG_NN_V2 ++ ++struct CNN_CONFIG; ++typedef struct CNN_CONFIG CNN_CONFIG; ++struct CNN_LAYER_CONFIG; ++typedef struct CNN_LAYER_CONFIG CNN_LAYER_CONFIG; ++struct CNN_THREAD_DATA; ++typedef struct CNN_THREAD_DATA CNN_THREAD_DATA; ++struct CNN_BRANCH_CONFIG; ++typedef struct CNN_BRANCH_CONFIG CNN_BRANCH_CONFIG; ++struct CNN_MULTI_OUT; ++typedef struct CNN_MULTI_OUT CNN_MULTI_OUT; ++ ++/* Function pointers return by CfL functions */ ++typedef void (*cfl_subsample_lbd_fn)(const uint8_t *input, int input_stride, ++ uint16_t *output_q3); ++ ++#if CONFIG_AV1_HIGHBITDEPTH ++typedef void (*cfl_subsample_hbd_fn)(const uint16_t *input, int input_stride, ++ uint16_t *output_q3); ++ ++typedef void (*cfl_predict_hbd_fn)(const int16_t *src, uint16_t *dst, ++ int dst_stride, int alpha_q3, int bd); ++#endif ++ ++typedef void (*cfl_subtract_average_fn)(const uint16_t *src, int16_t *dst); ++ ++typedef void (*cfl_predict_lbd_fn)(const int16_t *src, uint8_t *dst, ++ int dst_stride, int alpha_q3); ++ ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void aom_comp_avg_upsampled_pred_c(MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col, ++ const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, ++ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref, ++ int ref_stride, int subpel_search); ++#define aom_comp_avg_upsampled_pred aom_comp_avg_upsampled_pred_c ++ ++void aom_dist_wtd_comp_avg_upsampled_pred_c(MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col, ++ const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, ++ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref, ++ int ref_stride, const DIST_WTD_COMP_PARAMS *jcp_param, int subpel_search); ++#define aom_dist_wtd_comp_avg_upsampled_pred aom_dist_wtd_comp_avg_upsampled_pred_c ++ ++void aom_quantize_b_helper_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr, const int log_scale); ++#define aom_quantize_b_helper aom_quantize_b_helper_c ++ ++void aom_upsampled_pred_c(MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col, ++ const MV *const mv, uint8_t *comp_pred, int width, int height, int subpel_x_q3, ++ int subpel_y_q3, const uint8_t *ref, int ref_stride, int subpel_search); ++#define aom_upsampled_pred aom_upsampled_pred_c ++ ++int av1_apply_selfguided_restoration_c(const uint8_t *dat, int width, int height, int stride, int eps, const int *xqd, uint8_t *dst, int dst_stride, int32_t *tmpbuf, int bit_depth, int highbd); ++#define av1_apply_selfguided_restoration av1_apply_selfguided_restoration_c ++ ++int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz); ++#define av1_block_error av1_block_error_c ++ ++int64_t av1_block_error_lp_c(const int16_t *coeff, const int16_t *dqcoeff, intptr_t block_size); ++#define av1_block_error_lp av1_block_error_lp_c ++ ++void av1_build_compound_diffwtd_mask_c(uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const uint8_t *src0, int src0_stride, const uint8_t *src1, int src1_stride, int h, int w); ++#define av1_build_compound_diffwtd_mask av1_build_compound_diffwtd_mask_c ++ ++void av1_build_compound_diffwtd_mask_d16_c(uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0, int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, ConvolveParams *conv_params, int bd); ++#define av1_build_compound_diffwtd_mask_d16 av1_build_compound_diffwtd_mask_d16_c ++ ++void av1_calc_indices_dim1_c(const int16_t *data, const int16_t *centroids, uint8_t *indices, int64_t *total_dist, int n, int k); ++#define av1_calc_indices_dim1 av1_calc_indices_dim1_c ++ ++void av1_calc_indices_dim2_c(const int16_t *data, const int16_t *centroids, uint8_t *indices, int64_t *total_dist, int n, int k); ++#define av1_calc_indices_dim2 av1_calc_indices_dim2_c ++ ++void av1_convolve_2d_scale_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const InterpFilterParams *filter_params_y, const int subpel_x_qn, const int x_step_qn, const int subpel_y_qn, const int y_step_qn, ConvolveParams *conv_params); ++#define av1_convolve_2d_scale av1_convolve_2d_scale_c ++ ++void av1_convolve_2d_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const InterpFilterParams *filter_params_y, const int subpel_x_qn, const int subpel_y_qn, ConvolveParams *conv_params); ++#define av1_convolve_2d_sr av1_convolve_2d_sr_c ++ ++void av1_convolve_2d_sr_intrabc_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const InterpFilterParams *filter_params_y, const int subpel_x_qn, const int subpel_y_qn, ConvolveParams *conv_params); ++#define av1_convolve_2d_sr_intrabc av1_convolve_2d_sr_intrabc_c ++ ++void av1_convolve_horiz_rs_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const int16_t *x_filters, int x0_qn, int x_step_qn); ++#define av1_convolve_horiz_rs av1_convolve_horiz_rs_c ++ ++void av1_convolve_x_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, ConvolveParams *conv_params); ++#define av1_convolve_x_sr av1_convolve_x_sr_c ++ ++void av1_convolve_x_sr_intrabc_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, ConvolveParams *conv_params); ++#define av1_convolve_x_sr_intrabc av1_convolve_x_sr_intrabc_c ++ ++void av1_convolve_y_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn); ++#define av1_convolve_y_sr av1_convolve_y_sr_c ++ ++void av1_convolve_y_sr_intrabc_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn); ++#define av1_convolve_y_sr_intrabc av1_convolve_y_sr_intrabc_c ++ ++int av1_denoiser_filter_c(const uint8_t *sig, int sig_stride, const uint8_t *mc_avg, int mc_avg_stride, uint8_t *avg, int avg_stride, int increase_denoising, BLOCK_SIZE bs, int motion_magnitude); ++#define av1_denoiser_filter av1_denoiser_filter_c ++ ++void av1_dist_wtd_convolve_2d_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const InterpFilterParams *filter_params_y, const int subpel_x_qn, const int subpel_y_qn, ConvolveParams *conv_params); ++#define av1_dist_wtd_convolve_2d av1_dist_wtd_convolve_2d_c ++ ++void av1_dist_wtd_convolve_2d_copy_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, ConvolveParams *conv_params); ++#define av1_dist_wtd_convolve_2d_copy av1_dist_wtd_convolve_2d_copy_c ++ ++void av1_dist_wtd_convolve_x_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, ConvolveParams *conv_params); ++#define av1_dist_wtd_convolve_x av1_dist_wtd_convolve_x_c ++ ++void av1_dist_wtd_convolve_y_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn, ConvolveParams *conv_params); ++#define av1_dist_wtd_convolve_y av1_dist_wtd_convolve_y_c ++ ++void av1_dr_prediction_z1_c(uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_above, int dx, int dy); ++#define av1_dr_prediction_z1 av1_dr_prediction_z1_c ++ ++void av1_dr_prediction_z2_c(uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_above, int upsample_left, int dx, int dy); ++#define av1_dr_prediction_z2 av1_dr_prediction_z2_c ++ ++void av1_dr_prediction_z3_c(uint8_t *dst, ptrdiff_t stride, int bw, int bh, const uint8_t *above, const uint8_t *left, int upsample_left, int dx, int dy); ++#define av1_dr_prediction_z3 av1_dr_prediction_z3_c ++ ++void av1_filter_intra_edge_c(uint8_t *p, int sz, int strength); ++#define av1_filter_intra_edge av1_filter_intra_edge_c ++ ++void av1_filter_intra_predictor_c(uint8_t *dst, ptrdiff_t stride, TX_SIZE tx_size, const uint8_t *above, const uint8_t *left, int mode); ++#define av1_filter_intra_predictor av1_filter_intra_predictor_c ++ ++void av1_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_16x16 av1_fwd_txfm2d_16x16_c ++ ++void av1_fwd_txfm2d_16x32_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_16x32 av1_fwd_txfm2d_16x32_c ++ ++void av1_fwd_txfm2d_16x4_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_16x4 av1_fwd_txfm2d_16x4_c ++ ++void av1_fwd_txfm2d_16x8_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_16x8 av1_fwd_txfm2d_16x8_c ++ ++void av1_fwd_txfm2d_32x16_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_32x16 av1_fwd_txfm2d_32x16_c ++ ++void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_32x32 av1_fwd_txfm2d_32x32_c ++ ++void av1_fwd_txfm2d_32x64_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_32x64 av1_fwd_txfm2d_32x64_c ++ ++void av1_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_4x4 av1_fwd_txfm2d_4x4_c ++ ++void av1_fwd_txfm2d_4x8_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_4x8 av1_fwd_txfm2d_4x8_c ++ ++void av1_fwd_txfm2d_64x32_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_64x32 av1_fwd_txfm2d_64x32_c ++ ++void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_64x64 av1_fwd_txfm2d_64x64_c ++ ++void av1_fwd_txfm2d_8x16_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_8x16 av1_fwd_txfm2d_8x16_c ++ ++void av1_fwd_txfm2d_8x4_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_8x4 av1_fwd_txfm2d_8x4_c ++ ++void av1_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_fwd_txfm2d_8x8 av1_fwd_txfm2d_8x8_c ++ ++void av1_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride); ++#define av1_fwht4x4 av1_fwht4x4_c ++ ++uint32_t av1_get_crc32c_value_c(void *crc_calculator, uint8_t *p, size_t length); ++#define av1_get_crc32c_value av1_get_crc32c_value_c ++ ++void av1_get_horver_correlation_full_c(const int16_t *diff, int stride, int w, int h, float *hcorr, float *vcorr); ++#define av1_get_horver_correlation_full av1_get_horver_correlation_full_c ++ ++void av1_get_nz_map_contexts_c(const uint8_t *const levels, const int16_t *const scan, const uint16_t eob, const TX_SIZE tx_size, const TX_CLASS tx_class, int8_t *const coeff_contexts); ++#define av1_get_nz_map_contexts av1_get_nz_map_contexts_c ++ ++void av1_highbd_inv_txfm_add_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add av1_highbd_inv_txfm_add_c ++ ++void av1_highbd_inv_txfm_add_16x32_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_16x32 av1_highbd_inv_txfm_add_16x32_c ++ ++void av1_highbd_inv_txfm_add_16x4_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_16x4 av1_highbd_inv_txfm_add_16x4_c ++ ++void av1_highbd_inv_txfm_add_16x64_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_16x64 av1_highbd_inv_txfm_add_16x64_c ++ ++void av1_highbd_inv_txfm_add_16x8_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_16x8 av1_highbd_inv_txfm_add_16x8_c ++ ++void av1_highbd_inv_txfm_add_32x16_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_32x16 av1_highbd_inv_txfm_add_32x16_c ++ ++void av1_highbd_inv_txfm_add_32x32_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_32x32 av1_highbd_inv_txfm_add_32x32_c ++ ++void av1_highbd_inv_txfm_add_32x64_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_32x64 av1_highbd_inv_txfm_add_32x64_c ++ ++void av1_highbd_inv_txfm_add_32x8_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_32x8 av1_highbd_inv_txfm_add_32x8_c ++ ++void av1_highbd_inv_txfm_add_4x16_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_4x16 av1_highbd_inv_txfm_add_4x16_c ++ ++void av1_highbd_inv_txfm_add_4x4_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_4x4 av1_highbd_inv_txfm_add_4x4_c ++ ++void av1_highbd_inv_txfm_add_4x8_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_4x8 av1_highbd_inv_txfm_add_4x8_c ++ ++void av1_highbd_inv_txfm_add_64x16_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_64x16 av1_highbd_inv_txfm_add_64x16_c ++ ++void av1_highbd_inv_txfm_add_64x32_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_64x32 av1_highbd_inv_txfm_add_64x32_c ++ ++void av1_highbd_inv_txfm_add_64x64_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_64x64 av1_highbd_inv_txfm_add_64x64_c ++ ++void av1_highbd_inv_txfm_add_8x16_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_8x16 av1_highbd_inv_txfm_add_8x16_c ++ ++void av1_highbd_inv_txfm_add_8x32_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_8x32 av1_highbd_inv_txfm_add_8x32_c ++ ++void av1_highbd_inv_txfm_add_8x4_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_8x4 av1_highbd_inv_txfm_add_8x4_c ++ ++void av1_highbd_inv_txfm_add_8x8_c(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param); ++#define av1_highbd_inv_txfm_add_8x8 av1_highbd_inv_txfm_add_8x8_c ++ ++void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int bd); ++#define av1_highbd_iwht4x4_16_add av1_highbd_iwht4x4_16_add_c ++ ++void av1_highbd_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int bd); ++#define av1_highbd_iwht4x4_1_add av1_highbd_iwht4x4_1_add_c ++ ++void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_16x16 av1_inv_txfm2d_add_16x16_c ++ ++void av1_inv_txfm2d_add_16x32_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_16x32 av1_inv_txfm2d_add_16x32_c ++ ++void av1_inv_txfm2d_add_16x4_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_16x4 av1_inv_txfm2d_add_16x4_c ++ ++void av1_inv_txfm2d_add_16x64_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_16x64 av1_inv_txfm2d_add_16x64_c ++ ++void av1_inv_txfm2d_add_16x8_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_16x8 av1_inv_txfm2d_add_16x8_c ++ ++void av1_inv_txfm2d_add_32x16_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_32x16 av1_inv_txfm2d_add_32x16_c ++ ++void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_32x32 av1_inv_txfm2d_add_32x32_c ++ ++void av1_inv_txfm2d_add_32x64_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_32x64 av1_inv_txfm2d_add_32x64_c ++ ++void av1_inv_txfm2d_add_32x8_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_32x8 av1_inv_txfm2d_add_32x8_c ++ ++void av1_inv_txfm2d_add_4x16_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_4x16 av1_inv_txfm2d_add_4x16_c ++ ++void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_4x4 av1_inv_txfm2d_add_4x4_c ++ ++void av1_inv_txfm2d_add_4x8_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_4x8 av1_inv_txfm2d_add_4x8_c ++ ++void av1_inv_txfm2d_add_64x16_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_64x16 av1_inv_txfm2d_add_64x16_c ++ ++void av1_inv_txfm2d_add_64x32_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_64x32 av1_inv_txfm2d_add_64x32_c ++ ++void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_64x64 av1_inv_txfm2d_add_64x64_c ++ ++void av1_inv_txfm2d_add_8x16_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_8x16 av1_inv_txfm2d_add_8x16_c ++ ++void av1_inv_txfm2d_add_8x32_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_8x32 av1_inv_txfm2d_add_8x32_c ++ ++void av1_inv_txfm2d_add_8x4_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_8x4 av1_inv_txfm2d_add_8x4_c ++ ++void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd); ++#define av1_inv_txfm2d_add_8x8 av1_inv_txfm2d_add_8x8_c ++ ++void av1_inv_txfm_add_c(const tran_low_t *dqcoeff, uint8_t *dst, int stride, const TxfmParam *txfm_param); ++#define av1_inv_txfm_add av1_inv_txfm_add_c ++ ++void av1_lowbd_fwd_txfm_c(const int16_t *src_diff, tran_low_t *coeff, int diff_stride, TxfmParam *txfm_param); ++#define av1_lowbd_fwd_txfm av1_lowbd_fwd_txfm_c ++ ++void av1_nn_fast_softmax_16_c(const float *input_nodes, float *output); ++#define av1_nn_fast_softmax_16 av1_nn_fast_softmax_16_c ++ ++void av1_nn_predict_c(const float *input_nodes, const NN_CONFIG *const nn_config, int reduce_prec, float *const output); ++#define av1_nn_predict av1_nn_predict_c ++ ++void av1_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr, int log_scale); ++#define av1_quantize_b av1_quantize_b_c ++ ++void av1_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define av1_quantize_fp av1_quantize_fp_c ++ ++void av1_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define av1_quantize_fp_32x32 av1_quantize_fp_32x32_c ++ ++void av1_quantize_fp_64x64_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define av1_quantize_fp_64x64 av1_quantize_fp_64x64_c ++ ++void av1_quantize_lp_c(const int16_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr, const int16_t *quant_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); ++#define av1_quantize_lp av1_quantize_lp_c ++ ++void av1_resize_and_extend_frame_c(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, const InterpFilter filter, const int phase, const int num_planes); ++#define av1_resize_and_extend_frame av1_resize_and_extend_frame_c ++ ++void av1_round_shift_array_c(int32_t *arr, int size, int bit); ++#define av1_round_shift_array av1_round_shift_array_c ++ ++int av1_selfguided_restoration_c(const uint8_t *dgd8, int width, int height, ++ int dgd_stride, int32_t *flt0, int32_t *flt1, int flt_stride, ++ int sgr_params_idx, int bit_depth, int highbd); ++#define av1_selfguided_restoration av1_selfguided_restoration_c ++ ++void av1_txb_init_levels_c(const tran_low_t *const coeff, const int width, const int height, uint8_t *const levels); ++#define av1_txb_init_levels av1_txb_init_levels_c ++ ++void av1_upsample_intra_edge_c(uint8_t *p, int sz); ++#define av1_upsample_intra_edge av1_upsample_intra_edge_c ++ ++void av1_warp_affine_c(const int32_t *mat, const uint8_t *ref, int width, int height, int stride, uint8_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, ConvolveParams *conv_params, int16_t alpha, int16_t beta, int16_t gamma, int16_t delta); ++#define av1_warp_affine av1_warp_affine_c ++ ++void av1_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a, const int16_t *b, int N); ++#define av1_wedge_compute_delta_squares av1_wedge_compute_delta_squares_c ++ ++int8_t av1_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N, int64_t limit); ++#define av1_wedge_sign_from_residuals av1_wedge_sign_from_residuals_c ++ ++uint64_t av1_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d, const uint8_t *m, int N); ++#define av1_wedge_sse_from_residuals av1_wedge_sse_from_residuals_c ++ ++void av1_wiener_convolve_add_src_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, const WienerConvolveParams *conv_params); ++#define av1_wiener_convolve_add_src av1_wiener_convolve_add_src_c ++ ++void cdef_copy_rect8_16bit_to_16bit_c(uint16_t *dst, int dstride, const uint16_t *src, int sstride, int width, int height); ++#define cdef_copy_rect8_16bit_to_16bit cdef_copy_rect8_16bit_to_16bit_c ++ ++void cdef_copy_rect8_8bit_to_16bit_c(uint16_t *dst, int dstride, const uint8_t *src, int sstride, int width, int height); ++#define cdef_copy_rect8_8bit_to_16bit cdef_copy_rect8_8bit_to_16bit_c ++ ++void cdef_filter_16_0_c(void *dst16, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_16_0 cdef_filter_16_0_c ++ ++void cdef_filter_16_1_c(void *dst16, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_16_1 cdef_filter_16_1_c ++ ++void cdef_filter_16_2_c(void *dst16, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_16_2 cdef_filter_16_2_c ++ ++void cdef_filter_16_3_c(void *dst16, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_16_3 cdef_filter_16_3_c ++ ++void cdef_filter_8_0_c(void *dst8, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_8_0 cdef_filter_8_0_c ++ ++void cdef_filter_8_1_c(void *dst8, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_8_1 cdef_filter_8_1_c ++ ++void cdef_filter_8_2_c(void *dst8, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_8_2 cdef_filter_8_2_c ++ ++void cdef_filter_8_3_c(void *dst8, int dstride, const uint16_t *in, int pri_strength, int sec_strength, int dir, int pri_damping, int sec_damping, int coeff_shift, int block_width, int block_height); ++#define cdef_filter_8_3 cdef_filter_8_3_c ++ ++int cdef_find_dir_c(const uint16_t *img, int stride, int32_t *var, int coeff_shift); ++#define cdef_find_dir cdef_find_dir_c ++ ++void cdef_find_dir_dual_c(const uint16_t *img1, const uint16_t *img2, int stride, int32_t *var1, int32_t *var2, int coeff_shift, int *out1, int *out2); ++#define cdef_find_dir_dual cdef_find_dir_dual_c ++ ++cfl_subsample_lbd_fn cfl_get_luma_subsampling_420_lbd_c(TX_SIZE tx_size); ++#define cfl_get_luma_subsampling_420_lbd cfl_get_luma_subsampling_420_lbd_c ++ ++cfl_subsample_lbd_fn cfl_get_luma_subsampling_422_lbd_c(TX_SIZE tx_size); ++#define cfl_get_luma_subsampling_422_lbd cfl_get_luma_subsampling_422_lbd_c ++ ++cfl_subsample_lbd_fn cfl_get_luma_subsampling_444_lbd_c(TX_SIZE tx_size); ++#define cfl_get_luma_subsampling_444_lbd cfl_get_luma_subsampling_444_lbd_c ++ ++cfl_predict_lbd_fn cfl_get_predict_lbd_fn_c(TX_SIZE tx_size); ++#define cfl_get_predict_lbd_fn cfl_get_predict_lbd_fn_c ++ ++cfl_subtract_average_fn cfl_get_subtract_average_fn_c(TX_SIZE tx_size); ++cfl_subtract_average_fn cfl_get_subtract_average_fn_vsx(TX_SIZE tx_size); ++#define cfl_get_subtract_average_fn cfl_get_subtract_average_fn_vsx ++ ++void av1_rtcd(void); ++ ++#include "config/aom_config.h" ++ ++#ifdef RTCD_C ++#include "aom_ports/ppc.h" ++static void setup_rtcd_internal(void) ++{ ++ int flags = ppc_simd_caps(); ++ ++ (void)flags; ++ ++} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif diff --git a/0001-Fix-highway-ppc-hwcap.patch b/0001-Fix-highway-ppc-hwcap.patch new file mode 100644 index 0000000..74c6892 --- /dev/null +++ b/0001-Fix-highway-ppc-hwcap.patch @@ -0,0 +1,14 @@ +Index: chromium-120.0.6099.71/third_party/highway/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/highway/BUILD.gn ++++ chromium-120.0.6099.71/third_party/highway/BUILD.gn +@@ -13,6 +13,9 @@ config("libhwy_external_config") { + # explicitly disabling AVX2 and AVX3 targets. + defines += [ "HWY_BROKEN_TARGETS=(HWY_AVX2|HWY_AVX3)" ] + } ++ if (target_cpu == "ppc64") { ++ defines += [ "TOOLCHAIN_MISS_ASM_HWCAP_H" ] ++ } + } + + source_set("libhwy") { diff --git a/0001-Fix-libdav1d-compilation-on-clang-ppc.patch b/0001-Fix-libdav1d-compilation-on-clang-ppc.patch new file mode 100644 index 0000000..3c78d97 --- /dev/null +++ b/0001-Fix-libdav1d-compilation-on-clang-ppc.patch @@ -0,0 +1,33 @@ +From e14024659e0fc2af3df6ec56ce39a8e93b75722d Mon Sep 17 00:00:00 2001 +From: Colin Samples +Date: Sun, 8 Dec 2019 19:25:02 -0500 +Subject: [PATCH] Fix libdav1d compilation on clang ppc + +--- + src/ppc/dav1d_types.h | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +Index: chromium-120.0.6099.71/third_party/dav1d/libdav1d/src/ppc/dav1d_types.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/dav1d/libdav1d/src/ppc/dav1d_types.h ++++ chromium-120.0.6099.71/third_party/dav1d/libdav1d/src/ppc/dav1d_types.h +@@ -51,4 +51,19 @@ + #define u16l_to_i32(v) ((i32x4) vec_mergel((u16x8) v, vec_splat_u16(0))) + #define i16l_to_i32(v) ((i32x4) vec_unpackl((i16x8)v)) + ++#if defined(__clang__) ++#undef vec_splats ++#define vec_splats(N) \ ++ _Generic((N), \ ++ unsigned char: ((u8x16)(N)), \ ++ signed char: ((i8x16)(N)), \ ++ unsigned short: ((u16x8)(N)), \ ++ signed short: ((i16x8)(N)), \ ++ unsigned int: ((u32x4)(N)), \ ++ signed int: ((i32x4)(N)), \ ++ unsigned long long: ((u64x2)(N)), \ ++ signed long long: ((i64x2)(N)) \ ++ ) ++#endif ++ + #endif /* DAV1D_SRC_PPC_TYPES_H */ diff --git a/0001-Force-baseline-POWER8-AltiVec-VSX-CPU-features-when-.patch b/0001-Force-baseline-POWER8-AltiVec-VSX-CPU-features-when-.patch new file mode 100644 index 0000000..100ed7c --- /dev/null +++ b/0001-Force-baseline-POWER8-AltiVec-VSX-CPU-features-when-.patch @@ -0,0 +1,27 @@ +From ea104a841fca1ff4d5430915f1b7c52c6a642f13 Mon Sep 17 00:00:00 2001 +From: Timothy Pearson +Date: Fri, 21 Sep 2018 21:44:17 -0500 +Subject: [PATCH] Force baseline POWER8 / AltiVec / VSX CPU features when on a + PPC64 platform in LE mode + +--- + BUILD.gn | 6 ++++++ + 1 file changed, 6 insertions(+) + +Index: chromium-120.0.6099.71/v8/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/v8/BUILD.gn ++++ chromium-120.0.6099.71/v8/BUILD.gn +@@ -1340,6 +1340,12 @@ config("toolchain") { + } + if (host_byteorder == "little") { + defines += [ "V8_TARGET_ARCH_PPC_LE" ] ++ cflags += [ ++ # Enable usage of AltiVec, VSX, and other POWER8 and higher features ++ "-mcpu=power8", ++ "-maltivec", ++ "-mvsx", ++ ] + } else if (host_byteorder == "big") { + defines += [ "V8_TARGET_ARCH_PPC_BE" ] + if (current_os == "aix") { diff --git a/0001-Implement-support-for-PPC64-on-Linux.patch b/0001-Implement-support-for-PPC64-on-Linux.patch new file mode 100644 index 0000000..3544105 --- /dev/null +++ b/0001-Implement-support-for-PPC64-on-Linux.patch @@ -0,0 +1,1481 @@ +From 8c24c695052d156fd1322d6dacfab117b92cb175 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 30 Aug 2018 17:32:05 -0500 +Subject: [PATCH] Implement support for PPC64 on Linux + +This patch implements support for the PPC64 architecture on Linux hosts. +--- + CONTRIBUTORS | 1 + + minidump/minidump_context.h | 64 ++++++ + minidump/minidump_context_writer.cc | 50 +++++ + minidump/minidump_context_writer.h | 39 ++++ + minidump/minidump_context_writer_test.cc | 15 ++ + minidump/minidump_misc_info_writer.cc | 2 + + minidump/test/minidump_context_test_util.cc | 67 ++++++ + minidump/test/minidump_context_test_util.h | 3 + + snapshot/capture_memory.cc | 5 + + snapshot/cpu_architecture.h | 5 +- + snapshot/cpu_context.cc | 5 + + snapshot/cpu_context.h | 19 ++ + snapshot/linux/cpu_context_linux.h | 73 ++++++ + snapshot/linux/debug_rendezvous_test.cc | 4 +- + snapshot/linux/exception_snapshot_linux.cc | 63 ++++++ + snapshot/linux/exception_snapshot_linux.h | 2 + + .../linux/exception_snapshot_linux_test.cc | 21 ++ + snapshot/linux/process_reader_linux.cc | 2 + + snapshot/linux/signal_context.h | 83 +++++++ + snapshot/linux/system_snapshot_linux.cc | 11 + + snapshot/linux/thread_snapshot_linux.cc | 8 + + snapshot/linux/thread_snapshot_linux.h | 2 + + snapshot/test/test_cpu_context.cc | 33 +++ + snapshot/test/test_cpu_context.h | 1 + + test/linux/get_tls.cc | 2 + + test/multiprocess_posix.cc | 3 +- + util/linux/auxiliary_vector.cc | 5 + + util/linux/ptracer.cc | 61 +++++ + util/linux/thread_info.h | 55 +++++ + util/misc/capture_context.h | 1 + + util/misc/capture_context_linux.S | 212 +++++++++++++++++- + util/misc/capture_context_test.cc | 3 +- + util/misc/capture_context_test_util_linux.cc | 6 + + 36 files changed, 932 insertions(+), 12 deletions(-) + +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/CONTRIBUTORS +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/CONTRIBUTORS ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/CONTRIBUTORS +@@ -13,3 +13,5 @@ Mark Mentovai + Robert Sesek + Scott Graham + Joshua Peraza ++Shawn Anastasio ++Timothy Pearson +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/minidump_context.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context.h +@@ -687,6 +687,70 @@ struct MinidumpContextRISCV64 { + uint32_t fcsr; + }; + ++//! \brief ppc64-specific flags for MinidumpPPC64::context_flags ++//! Based on minidump_cpu_ppc64.h from breakpad ++enum MinidumpContextPPC64Flags : uint32_t { ++ //! \brief Identifies the context as PPC64. ++ kMinidumpContextPPC64 = 0x01000000, ++ ++ //! \brief Indicates the validity of general purpose registers. ++ //! ++ //! Registers `r0`-`r31`, `nip`, `msr`, `lr`, etc. are valid. ++ kMinidumpContextPPC64Base = kMinidumpContextPPC64 | 0x00000001, ++ ++ //! \brief Indicates the validity of floating point registers. ++ //! ++ //! Registers `fp0`-`fp31`, `fpscr` are valid. ++ kMinidumpContextPPC64Floating = kMinidumpContextPPC64 | 0x00000008, ++ ++ //! \brief Indicates the validity of Altivec/VMX registers. ++ //! ++ //! Registers `v0`-`v31`, `vscr`, `vrsave`. ++ kMinidumpContextPPC64Vector = kMinidumpContextPPC64 | 0x00000020, ++ ++ //! \brief Indicates the validity of all registers ++ kMinidumpContextPPC64All = kMinidumpContextPPC64Base | ++ kMinidumpContextPPC64Floating | ++ kMinidumpContextPPC64Vector ++}; ++ ++//! \brief A PPC64 CPU context carried in a minidump file. ++//! Based on minidump_cpu_ppc64.h from breakpad. ++struct MinidumpContextPPC64 { ++ uint64_t context_flags; ++ ++ //! \brief General purpose registers. ++ uint64_t nip; ++ uint64_t msr; ++ uint64_t regs[32]; ++ uint64_t ccr; ++ uint64_t xer; ++ uint64_t lnk; ++ uint64_t ctr; ++ ++ //! \brief Floating point registers. ++ double fpregs[32]; ++ ++ //! \brief FPU status register. ++ double fpscr; ++ ++ //! \brief Altivec/VMX vector registers. ++ struct { ++ //! \brief Vector registers are 128bits. ++ uint128_struct save_vr[32]; ++ uint128_struct save_vscr; ++ ++ //! \brief Padding included for breakpad compatibiltiy. ++ uint32_t save_pad5[4]; ++ ++ //! \brief VRSAVE register. ++ uint32_t save_vrsave; ++ ++ //! \brief Padding included for breakpad compatibiltiy. ++ uint32_t save_pad6[7]; ++ } vregs; ++}; ++ + } // namespace crashpad + + #endif // CRASHPAD_MINIDUMP_MINIDUMP_CONTEXT_H_ +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/minidump_context_writer.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer.cc +@@ -110,6 +110,13 @@ MinidumpContextWriter::CreateFromSnapsho + break; + } + ++ case kCPUArchitecturePPC64: { ++ context = std::make_unique(); ++ reinterpret_cast(context.get()) ++ ->InitializeFromSnapshot(context_snapshot->ppc64); ++ break; ++ } ++ + default: { + LOG(ERROR) << "unknown context architecture " + << context_snapshot->architecture; +@@ -601,5 +608,48 @@ size_t MinidumpContextRISCV64Writer::Con + DCHECK_GE(state(), kStateFrozen); + return sizeof(context_); + } ++ ++MinidumpContextPPC64Writer::MinidumpContextPPC64Writer() ++ : MinidumpContextWriter(), context_() { ++ context_.context_flags = kMinidumpContextPPC64; ++} ++ ++MinidumpContextPPC64Writer::~MinidumpContextPPC64Writer() = default; ++ ++void MinidumpContextPPC64Writer::InitializeFromSnapshot( ++ const CPUContextPPC64* context_snapshot) { ++ DCHECK_EQ(state(), kStateMutable); ++ DCHECK_EQ(context_.context_flags, kMinidumpContextPPC64); ++ ++ context_.context_flags = kMinidumpContextPPC64All; ++ ++ memcpy(context_.regs, context_snapshot->regs, sizeof(context_.regs)); ++ context_.nip = context_snapshot->nip; ++ context_.msr = context_snapshot->msr; ++ context_.ccr = context_snapshot->ccr; ++ context_.xer = context_snapshot->xer; ++ context_.lnk = context_snapshot->lnk; ++ context_.ctr = context_snapshot->ctr; ++ ++ memcpy(context_.fpregs, context_snapshot->fpregs, sizeof(context_.fpregs)); ++ context_.fpscr = context_snapshot->fpscr; ++ ++ memcpy(context_.vregs.save_vr, context_snapshot->vregs.save_vr, ++ sizeof(context_.vregs.save_vr)); ++ memcpy(&context_.vregs.save_vscr, &context_snapshot->vregs.save_vscr, ++ sizeof(context_.vregs.save_vscr)); ++ context_.vregs.save_vrsave = context_snapshot->vregs.save_vrsave; ++} ++ ++bool MinidumpContextPPC64Writer::WriteObject( ++ FileWriterInterface* file_writer) { ++ DCHECK_EQ(state(), kStateWritable); ++ return file_writer->Write(&context_, sizeof(context_)); ++} ++ ++size_t MinidumpContextPPC64Writer::ContextSize() const { ++ DCHECK_GE(state(), kStateFrozen); ++ return sizeof(context_); ++} + + } // namespace crashpad +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/minidump_context_writer.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer.h +@@ -413,6 +413,49 @@ class MinidumpContextRISCV64Writer final + MinidumpContextRISCV64 context_; + }; + ++//! \brief The writer for a MinidumpContextPPC64 structure in a minidump file. ++class MinidumpContextPPC64Writer final : public MinidumpContextWriter { ++ public: ++ MinidumpContextPPC64Writer(); ++ ++ MinidumpContextPPC64Writer(const MinidumpContextPPC64Writer&) = delete; ++ MinidumpContextPPC64Writer& operator=(const MinidumpContextPPC64Writer&) = ++ delete; ++ ++ ~MinidumpContextPPC64Writer() override; ++ ++ //! \brief Initializes the MinidumpContextPPC based on \a context_snapshot. ++ //! ++ //! \param[in] context_snapshot The context snapshot to use as source data. ++ //! ++ //! \note Valid in #kStateMutable. No mutation of context() may be done before ++ //! calling this method, and it is not normally necessary to alter ++ //! context() after calling this method. ++ void InitializeFromSnapshot(const CPUContextPPC64* context_snapshot); ++ ++ //! \brief Returns a pointer to the context structure that this object will ++ //! write. ++ //! ++ //! \attention This returns a non-`const` pointer to this object’s private ++ //! data so that a caller can populate the context structure directly. ++ //! This is done because providing setter interfaces to each field in the ++ //! context structure would be unwieldy and cumbersome. Care must be taken ++ //! to populate the context structure correctly. The context structure ++ //! must only be modified while this object is in the #kStateMutable ++ //! state. ++ MinidumpContextPPC64* context() { return &context_; } ++ ++ protected: ++ // MinidumpWritable: ++ bool WriteObject(FileWriterInterface* file_writer) override; ++ ++ // MinidumpContextWriter: ++ size_t ContextSize() const override; ++ ++ private: ++ MinidumpContextPPC64 context_; ++}; ++ + } // namespace crashpad + + #endif // CRASHPAD_MINIDUMP_MINIDUMP_CONTEXT_WRITER_H_ +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer_test.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/minidump_context_writer_test.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_context_writer_test.cc +@@ -328,6 +328,21 @@ TYPED_TEST(MinidumpContextWriter, RISCV6 + TypeParam>(context, ExpectMinidumpContextRISCV64, kSeed); + } + ++TEST(MinidumpContextWriter, PPC64_Zeros) { ++ EmptyContextTest( ++ ExpectMinidumpContextPPC64); ++} ++ ++TEST(MinidumpContextWriter, PPC64_FromSnapshot) { ++ constexpr uint32_t kSeed = 64; ++ CPUContextPPC64 context_ppc64; ++ CPUContext context; ++ context.ppc64 = &context_ppc64; ++ InitializeCPUContextPPC64(&context, kSeed); ++ FromSnapshotTest( ++ context, ExpectMinidumpContextPPC64, kSeed); ++} ++ + } // namespace + } // namespace test + } // namespace crashpad +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/minidump_misc_info_writer.cc +@@ -177,6 +177,8 @@ std::string MinidumpMiscInfoDebugBuildSt + static constexpr char kCPU[] = "mips64"; + #elif defined(ARCH_CPU_RISCV64) + static constexpr char kCPU[] = "riscv64"; ++#elif defined(ARCH_CPU_PPC64) ++ static constexpr char kCPU[] = "ppc64"; + #else + #error define kCPU for this CPU + #endif +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.cc +@@ -297,6 +297,40 @@ void InitializeMinidumpContextRISCV64(Mi + context->fcsr = value++; + } + ++void InitializeMinidumpContextPPC64(MinidumpContextPPC64* context, ++ uint32_t seed) { ++ if (seed == 0) { ++ memset(context, 0, sizeof(*context)); ++ context->context_flags = kMinidumpContextPPC64; ++ return; ++ } ++ ++ context->context_flags = kMinidumpContextPPC64All; ++ ++ uint64_t value = seed; ++ for (size_t i = 0; i < base::size(context->regs); ++i) { ++ context->regs[i] = value++; ++ } ++ ++ context->nip = value++; ++ context->msr = value++; ++ context->ccr = value++; ++ context->xer = value++; ++ context->lnk = value++; ++ context->ctr = value++; ++ ++ for (size_t i = 0; i < base::size(context->fpregs); ++i) { ++ context->fpregs[i] = static_cast(i); ++ } ++ context->fpscr = value++; ++ ++ for (size_t i = 0; i < base::size(context->vregs.save_vr); ++i) { ++ context->vregs.save_vr[i] = {value++, value++}; ++ } ++ context->vregs.save_vscr = {value++, value++}; ++ context->vregs.save_vrsave = value++; ++} ++ + namespace { + + // Using Google Test assertions, compares |expected| to |observed|. This is +@@ -645,5 +679,38 @@ void ExpectMinidumpContextRISCV64(uint32 + EXPECT_EQ(observed->fcsr, expected.fcsr); + } + ++void ExpectMinidumpContextPPC64(uint32_t expect_seed, ++ const MinidumpContextPPC64* observed, ++ bool snapshot) { ++ MinidumpContextPPC64 expected; ++ InitializeMinidumpContextPPC64(&expected, expect_seed); ++ ++ EXPECT_EQ(observed->context_flags, expected.context_flags); ++ ++ for (size_t i = 0; i < base::size(expected.regs); ++i) { ++ EXPECT_EQ(observed->regs[i], expected.regs[i]); ++ } ++ ++ EXPECT_EQ(observed->nip, expected.nip); ++ EXPECT_EQ(observed->msr, expected.msr); ++ EXPECT_EQ(observed->ccr, expected.ccr); ++ EXPECT_EQ(observed->xer, expected.xer); ++ EXPECT_EQ(observed->lnk, expected.lnk); ++ EXPECT_EQ(observed->ctr, expected.ctr); ++ ++ for (size_t i = 0; i < base::size(expected.fpregs); ++i) { ++ EXPECT_EQ(observed->fpregs[i], expected.fpregs[i]); ++ } ++ EXPECT_EQ(observed->fpscr, expected.fpscr); ++ ++ for (size_t i = 0; i < base::size(expected.vregs.save_vr); ++ i) { ++ EXPECT_EQ(observed->vregs.save_vr[i].lo, expected.vregs.save_vr[i].lo); ++ EXPECT_EQ(observed->vregs.save_vr[i].hi, expected.vregs.save_vr[i].hi); ++ } ++ EXPECT_EQ(observed->vregs.save_vscr.lo, expected.vregs.save_vscr.lo); ++ EXPECT_EQ(observed->vregs.save_vscr.hi, expected.vregs.save_vscr.hi); ++ EXPECT_EQ(observed->vregs.save_vrsave, expected.vregs.save_vrsave); ++} ++ + } // namespace test + } // namespace crashpad +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/minidump/test/minidump_context_test_util.h +@@ -90,6 +90,9 @@ void ExpectMinidumpContextMIPS64(uint32_ + void ExpectMinidumpContextRISCV64(uint32_t expect_seed, + const MinidumpContextRISCV64* observed, + bool snapshot); ++void ExpectMinidumpContextPPC64(uint32_t expect_seed, ++ const MinidumpContextPPC64* observed, ++ bool snapshot); + //! \} + + } // namespace test +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/capture_memory.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/capture_memory.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/capture_memory.cc +@@ -122,6 +122,11 @@ void CaptureMemory::PointedToByContext(c + for (size_t i = 0; i < std::size(context.riscv64->regs); ++i) { + MaybeCaptureMemoryAround(delegate, context.riscv64->regs[i]); + } ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ MaybeCaptureMemoryAround(delegate, context.ppc64->nip); ++ for (size_t i = 0; i < std::size(context.ppc64->regs); ++i) { ++ MaybeCaptureMemoryAround(delegate, context.ppc64->regs[i]); ++ } + #else + #error Port. + #endif +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/cpu_architecture.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_architecture.h +@@ -47,6 +47,9 @@ enum CPUArchitecture { + + //! \brief 64-bit RISC-V. + kCPUArchitectureRISCV64, ++ ++ //! \brief 64-bit PPC64. ++ kCPUArchitecturePPC64 + }; + + } // namespace crashpad +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_context.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/cpu_context.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_context.cc +@@ -173,6 +173,8 @@ uint64_t CPUContext::InstructionPointer( + return arm64->pc; + case kCPUArchitectureRISCV64: + return riscv64->pc; ++ case kCPUArchitecturePPC64: ++ return ppc64->nip; + default: + NOTREACHED(); + return ~0ull; +@@ -191,6 +193,8 @@ uint64_t CPUContext::StackPointer() cons + return arm64->sp; + case kCPUArchitectureRISCV64: + return riscv64->regs[1]; ++ case kCPUArchitecturePPC64: ++ return ppc64->regs[1]; + default: + NOTREACHED(); + return ~0ull; +@@ -231,6 +235,7 @@ bool CPUContext::Is64Bit() const { + case kCPUArchitectureX86_64: + case kCPUArchitectureARM64: + case kCPUArchitectureMIPS64EL: ++ case kCPUArchitecturePPC64: + case kCPUArchitectureRISCV64: + return true; + case kCPUArchitectureX86: +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_context.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/cpu_context.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/cpu_context.h +@@ -371,6 +371,24 @@ struct CPUContextRISCV64 { + uint32_t fcsr; + }; + ++//! \brief A context structure carrying PPC64 CPU state. ++struct CPUContextPPC64 { ++ uint64_t nip; ++ uint64_t msr; ++ uint64_t regs[32]; ++ uint64_t ccr; ++ uint64_t xer; ++ uint64_t lnk; ++ uint64_t ctr; ++ double fpregs[32]; ++ double fpscr; ++ struct { ++ uint128_struct save_vr[32]; ++ uint128_struct save_vscr; ++ uint32_t save_vrsave; ++ } vregs; ++}; ++ + //! \brief A context structure capable of carrying the context of any supported + //! CPU architecture. + struct CPUContext { +@@ -412,6 +430,7 @@ struct CPUContext { + CPUContextMIPS* mipsel; + CPUContextMIPS64* mips64; + CPUContextRISCV64* riscv64; ++ CPUContextPPC64* ppc64; + }; + }; + +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/cpu_context_linux.h +@@ -15,6 +15,7 @@ + #ifndef CRASHPAD_SNAPSHOT_LINUX_CPU_CONTEXT_LINUX_H_ + #define CRASHPAD_SNAPSHOT_LINUX_CPU_CONTEXT_LINUX_H_ + ++#include + #include "build/build_config.h" + #include "snapshot/cpu_context.h" + #include "snapshot/linux/signal_context.h" +@@ -188,6 +189,78 @@ void InitializeCPUContextRISCV64(const T + + #endif // ARCH_CPU_RISCV64 || DOXYGEN + ++#if defined(ARCH_CPU_PPC64_FAMILY) || DOXYGEN ++ ++//! \brief Initializes a CPUContextPPC64 structure from native context ++//! structures on Linux. ++//! ++//! \param[in] thread_context The native thread context. ++//! \param[in] float_context The native float context. ++//! \param[in] vector_context The native vector context. ++//! \param[out] context The CPUContextPPC64 structure to initialize. ++template ++void InitializeCPUContextPPC64( ++ const ThreadContext::t64_t& thread_context, ++ const FloatContext::f64_t& float_context, ++ const VectorContext::v64_t& vector_context, ++ typename Traits::CPUContext* context) { ++ ++ memcpy(context->regs, thread_context.gpr, sizeof(context->regs)); ++ context->nip = thread_context.nip; ++ context->msr = thread_context.msr; ++ context->ccr = thread_context.ccr; ++ context->xer = thread_context.xer; ++ context->lnk = thread_context.lnk; ++ context->ctr = thread_context.ctr; ++ ++ memcpy(context->fpregs, float_context.fpregs, sizeof(context->fpregs)); ++ context->fpscr = float_context.fpscr; ++ ++ for (uint8_t i = 0; i < 32; i++) { ++ context->vregs.save_vr[i] = { ++ (((uint64_t)vector_context.vrregs[i][0]) << 32) | ++ vector_context.vrregs[i][1], ++ (((uint64_t)vector_context.vrregs[i][2]) << 32) | ++ vector_context.vrregs[i][3] ++ }; ++ } ++ context->vregs.save_vrsave = vector_context.vrsave; ++ context->vregs.save_vscr = {0, (uint64_t)vector_context.vscr.vscr_word}; ++} ++ ++template ++void InitializeCPUContextPPC64( ++ const SignalThreadContext64 &thread_context, ++ const SignalFloatContext64 &float_context, ++ const SignalVectorContext64 &vector_context, ++ typename Traits::CPUContext* context) { ++ ++ memcpy(context->regs, thread_context.regs, sizeof(context->regs)); ++ context->nip = thread_context.nip; ++ context->msr = thread_context.msr; ++ context->ccr = thread_context.ccr; ++ context->xer = thread_context.xer; ++ context->lnk = thread_context.lnk; ++ context->ctr = thread_context.ctr; ++ ++ memcpy(context->fpregs, float_context.regs, sizeof(context->fpregs)); ++ context->fpscr = float_context.fpscr; ++ ++ for (uint8_t i = 0; i < 32; i++) { ++ context->vregs.save_vr[i] = { ++ (((uint64_t)vector_context.vrregs[i][0]) << 32) | ++ vector_context.vrregs[i][1], ++ (((uint64_t)vector_context.vrregs[i][2]) << 32) | ++ vector_context.vrregs[i][3] ++ }; ++ } ++ context->vregs.save_vrsave = vector_context.vrsave; ++ context->vregs.save_vscr = {0, (uint64_t)vector_context.vscr.vscr_word}; ++} ++ ++ ++#endif ++ + } // namespace internal + } // namespace crashpad + +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/debug_rendezvous_test.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/debug_rendezvous_test.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/debug_rendezvous_test.cc +@@ -195,12 +195,15 @@ void TestAgainstTarget(PtraceConnection* + device == 0 && inode == 0 && mapping_name == "[vdso]"; + #if defined(ARCH_CPU_X86) + static constexpr char kPrefix[] = "linux-gate.so."; ++ static constexpr char kPrefix64[] = "linux-gate.so."; + #else + static constexpr char kPrefix[] = "linux-vdso.so."; ++ static constexpr char kPrefix64[] = "linux-vdso64.so."; + #endif + return is_vdso_mapping == + (module_name.empty() || +- module_name.compare(0, strlen(kPrefix), kPrefix) == 0); ++ module_name.compare(0, strlen(kPrefix), kPrefix) == 0) || ++ module_name.compare(0, strlen(kPrefix64), kPrefix64) == 0); + }, + module_mapping->name, + module_mapping->device, +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.cc +@@ -367,6 +367,69 @@ bool ExceptionSnapshotLinux::ReadContext + return internal::ReadContext(reader, context_address, context_.riscv64); + } + ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ ++template ++static bool ReadContext(ProcessReaderLinux* reader, ++ LinuxVMAddress context_address, ++ typename Traits::CPUContext* dest_context) { ++ const ProcessMemory* memory = reader->Memory(); ++ ++ LinuxVMAddress gp_regs_address = context_address + ++ offsetof(UContext, mcontext) + ++ offsetof(typename Traits::MContext, gp_regs); ++ ++ typename Traits::SignalThreadContext thread_context; ++ if (!memory->Read(gp_regs_address, sizeof(thread_context), &thread_context)) { ++ LOG(ERROR) << "Couldn't read gp_regs!"; ++ return false; ++ } ++ ++ LinuxVMAddress fp_regs_address = context_address + ++ offsetof(UContext, mcontext) + ++ offsetof(typename Traits::MContext, fp_regs); ++ ++ typename Traits::SignalFloatContext fp_context; ++ if (!memory->Read(fp_regs_address, sizeof(fp_context), &fp_context)) { ++ LOG(ERROR) << "Couldn't read fp_regs!"; ++ return false; ++ } ++ ++ LinuxVMAddress v_regs_ptr_address = context_address + ++ offsetof(UContext, mcontext) + ++ offsetof(typename Traits::MContext, vmx_reserve) + 8; ++ ++ typename Traits::SignalVectorContext v_context; ++ if (!memory->Read(v_regs_ptr_address, sizeof(v_context), &v_context)) { ++ LOG(ERROR) << "Couldn't read v_regs!"; ++ return false; ++ } ++ ++ InitializeCPUContextPPC64(thread_context, fp_context, ++ v_context, dest_context); ++ ++ return true; ++} ++ ++template<> ++bool ExceptionSnapshotLinux::ReadContext( ++ ProcessReaderLinux* reader, ++ LinuxVMAddress context_address) { ++ context_.architecture = kCPUArchitecturePPC64; ++ context_.ppc64 = &context_union_.ppc64; ++ ++ return internal::ReadContext( ++ reader, context_address, context_.ppc64); ++} ++ ++template<> ++bool ExceptionSnapshotLinux::ReadContext( ++ ProcessReaderLinux* reader, ++ LinuxVMAddress context_address) { ++ // PPC64 is 64-bit ++ return false; ++} ++ + #endif // ARCH_CPU_X86_FAMILY + + bool ExceptionSnapshotLinux::Initialize( +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux.h +@@ -91,6 +91,8 @@ class ExceptionSnapshotLinux final : pub + CPUContextMIPS64 mips64; + #elif defined(ARCH_CPU_RISCV64) + CPUContextRISCV64 riscv64; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ CPUContextPPC64 ppc64; + #endif + } context_union_; + CPUContext context_; +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux_test.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux_test.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/exception_snapshot_linux_test.cc +@@ -325,7 +325,28 @@ void ExpectContext(const CPUContext& act + sizeof(actual.riscv64->fpregs)), + 0); + } ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++using NativeCPUContext = ucontext_t; + ++void InitializeContext(NativeCPUContext* context) { ++ for (size_t reg = 0; reg < 32; ++reg) { ++ context->uc_mcontext.gp_regs[reg] = reg; ++ } ++ ++ memset(&context->uc_mcontext.fp_regs, 44, ++ sizeof(context->uc_mcontext.fp_regs)); ++} ++ ++void ExpectContext(const CPUContext& actual, const NativeCPUContext& expected) { ++ EXPECT_EQ(actual.architecture, kCPUArchitecturePPC64); ++ ++ for (size_t reg = 0; reg < 32; ++reg) { ++ EXPECT_EQ(actual.ppc64->regs[reg], expected.uc_mcontext.gp_regs[reg]); ++ } ++ ++ EXPECT_EQ(memcmp(actual.ppc64->fpregs, expected.uc_mcontext.fp_regs, ++ sizeof(actual.ppc64->fpregs)), 0); ++} + #else + #error Port. + #endif +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/process_reader_linux.cc +@@ -129,6 +129,8 @@ void ProcessReaderLinux::Thread::Initial + : thread_info.thread_context.t32.regs[29]; + #elif defined(ARCH_CPU_RISCV64) + stack_pointer = thread_info.thread_context.t64.regs[1]; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ stack_pointer = thread_info.thread_context.t64.gpr[1]; + #else + #error Port. + #endif +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/signal_context.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/signal_context.h +@@ -456,6 +456,89 @@ static_assert(offsetof(UContext stack; ++ Sigset sigmask; ++ MContext64 mcontext; ++}; ++#pragma pack(push, 1) ++ ++static_assert(sizeof(UContext) == sizeof(ucontext_t), ++ "ucontext_t size mismatch"); ++static_assert(sizeof(MContext64) == sizeof(mcontext_t), ++ "mcontext_t size mismatch"); ++static_assert(sizeof(SignalThreadContext64) == sizeof(gregset_t), ++ "gregset_t size mismatch"); ++static_assert(sizeof(SignalFloatContext64) == sizeof(fpregset_t), ++ "fpregset_t size mismatch"); ++static_assert(sizeof(SignalVectorContext64) == sizeof(_libc_vrstate), ++ "vrstate size mismatch"); ++static_assert(offsetof(UContext, mcontext) == ++ offsetof(ucontext_t, uc_mcontext), "mcontext offset mismatch"); ++static_assert(offsetof(MContext64, gp_regs) == ++ offsetof(mcontext_t, gp_regs), "gp_regs offset mismatch"); + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/system_snapshot_linux.cc +@@ -208,6 +208,8 @@ CPUArchitecture SystemSnapshotLinux::Get + : kCPUArchitectureMIPSEL; + #elif defined(ARCH_CPU_RISCV64) + return kCPUArchitectureRISCV64; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ return kCPUArchitecturePPC64; + #else + #error port to your architecture + #endif +@@ -226,6 +228,9 @@ uint32_t SystemSnapshotLinux::CPURevisio + #elif defined(ARCH_CPU_RISCV64) + // Not implemented + return 0; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Not yet implemented on PPC64 ++ return 0; + #else + #error port to your architecture + #endif +@@ -249,6 +254,9 @@ std::string SystemSnapshotLinux::CPUVend + #elif defined(ARCH_CPU_RISCV64) + // Not implemented + return std::string(); ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Not yet implemented on PPC64 ++ return std::string(); + #else + #error port to your architecture + #endif +@@ -385,6 +393,9 @@ bool SystemSnapshotLinux::NXEnabled() co + #elif defined(ARCH_CPU_RISCV64) + // Not implemented + return false; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Not yet implemented on PPC64 ++ return false; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.cc +@@ -196,6 +196,14 @@ bool ThreadSnapshotLinux::Initialize( + InitializeCPUContextRISCV64(thread.thread_info.thread_context.t64, + thread.thread_info.float_context.f64, + context_.riscv64); ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ context_.architecture = kCPUArchitecturePPC64; ++ context_.ppc64 = &context_union_.ppc64; ++ InitializeCPUContextPPC64( ++ thread.thread_info.thread_context.t64, ++ thread.thread_info.float_context.f64, ++ thread.thread_info.vector_context.v64, ++ context_.ppc64); + #else + #error Port. + #endif +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/linux/thread_snapshot_linux.h +@@ -76,6 +76,8 @@ class ThreadSnapshotLinux final : public + CPUContextMIPS64 mips64; + #elif defined(ARCH_CPU_RISCV64) + CPUContextRISCV64 riscv64; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ CPUContextPPC64 ppc64; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.cc +@@ -317,5 +317,38 @@ void InitializeCPUContextRISCV64(CPUCont + riscv64->fcsr = value++; + } + ++void InitializeCPUContextPPC64(CPUContext* context, uint32_t seed) { ++ context->architecture = kCPUArchitecturePPC64; ++ CPUContextPPC64* ppc64 = context->ppc64; ++ ++ if (seed == 0) { ++ memset(ppc64, 0, sizeof(*ppc64)); ++ return; ++ } ++ ++ uint64_t value = seed; ++ for (size_t i = 0; i < base::size(ppc64->regs); ++i) { ++ ppc64->regs[i] = value++; ++ } ++ ++ ppc64->nip = value++; ++ ppc64->msr = value++; ++ ppc64->ccr = value++; ++ ppc64->xer = value++; ++ ppc64->lnk = value++; ++ ppc64->ctr = value++; ++ ++ for (size_t i = 0; i < base::size(ppc64->fpregs); ++i) { ++ ppc64->fpregs[i] = static_cast(i); ++ } ++ ppc64->fpscr = value++; ++ ++ for (size_t i = 0; i < base::size(ppc64->vregs.save_vr); ++i) { ++ ppc64->vregs.save_vr[i] = {value++, value++}; ++ } ++ ppc64->vregs.save_vscr = {value++, value++}; ++ ppc64->vregs.save_vrsave = value++; ++} ++ + } // namespace test + } // namespace crashpad +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/snapshot/test/test_cpu_context.h +@@ -64,6 +64,7 @@ void InitializeCPUContextARM64(CPUContex + void InitializeCPUContextMIPS(CPUContext* context, uint32_t seed); + void InitializeCPUContextMIPS64(CPUContext* context, uint32_t seed); + void InitializeCPUContextRISCV64(CPUContext* context, uint32_t seed); ++void InitializeCPUContextPPC64(CPUContext* context, uint32_t seed); + //! \} + + } // namespace test +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/test/linux/get_tls.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/test/linux/get_tls.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/test/linux/get_tls.cc +@@ -51,6 +51,8 @@ LinuxVMAddress GetTLS() { + : "$3"); + #elif defined(ARCH_CPU_RISCV64) + asm("mv %0, tp" : "=r"(tls)); ++#elif defined(ARCH_CPU_PPC64) ++ asm("mr %0, 13": "=r"(tls)); + #else + #error Port. + #endif // ARCH_CPU_ARMEL +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/test/multiprocess_posix.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/test/multiprocess_posix.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/test/multiprocess_posix.cc +@@ -162,7 +162,8 @@ void Multiprocess::SetExpectedChildTermi + } + + void Multiprocess::SetExpectedChildTerminationBuiltinTrap() { +-#if defined(ARCH_CPU_ARM64) || defined(ARCH_CPU_MIPS_FAMILY) ++#if defined(ARCH_CPU_ARM64) || defined(ARCH_CPU_MIPS_FAMILY) || \ ++ defined(ARCH_CPU_PPC64_FAMILY) + SetExpectedChildTermination(kTerminationSignal, SIGTRAP); + #else + SetExpectedChildTermination(kTerminationSignal, SIGILL); +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/auxiliary_vector.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/linux/auxiliary_vector.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/auxiliary_vector.cc +@@ -56,6 +56,11 @@ bool AuxiliaryVector::Read(PtraceConnect + if (type == AT_IGNORE) { + continue; + } ++#if defined(ARCH_CPU_PPC64_FAMILY) ++ if (type == AT_IGNOREPPC) { ++ continue; ++ } ++#endif + if (!MapInsertOrReplace(&values_, type, value, nullptr)) { + LOG(ERROR) << "duplicate auxv entry"; + return false; +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/ptracer.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/linux/ptracer.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/ptracer.cc +@@ -430,6 +430,64 @@ bool GetThreadArea64(pid_t tid, + return true; + } + ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++// PPC64 has had HAVE_ARCH_TRACEHOOK set since 2.6.27 (even before x86 had it). ++// That means we can simply use PTRACE_GETREGESET. ++ ++template ++bool GetRegisterSet(pid_t tid, int set, Destination* dest, bool can_log) { ++ iovec iov; ++ iov.iov_base = reinterpret_cast(dest); ++ iov.iov_len = sizeof(*dest); ++ if (ptrace(PTRACE_GETREGSET, tid, reinterpret_cast(set), &iov) != 0) { ++ PLOG_IF(ERROR, can_log) << "ptrace"; ++ return false; ++ } ++ if (iov.iov_len != sizeof(*dest)) { ++ LOG_IF(ERROR, can_log) << "Unexpected registers size"; ++ return false; ++ } ++ return true; ++} ++ ++bool GetVectorRegisters64(pid_t tid, ++ VectorContext* context, ++ bool can_log) { ++ return GetRegisterSet(tid, NT_PPC_VMX, &context->v64, can_log); ++} ++ ++bool GetFloatingPointRegisters64(pid_t tid, ++ FloatContext* context, ++ bool can_log) { ++ return GetRegisterSet(tid, NT_PRFPREG, &context->f64, can_log); ++} ++ ++bool GetThreadArea64(pid_t tid, ++ const ThreadContext& context, ++ LinuxVMAddress* address, ++ bool can_log) { ++ // PPC64 doesn't have PTRACE_GET_THREAD_AREA since the thread pointer ++ // is stored in GPR 13. ++ ThreadContext::t64_t tc; ++ if (!GetRegisterSet(tid, NT_PRSTATUS, &tc, can_log)) { ++ LOG_IF(ERROR, can_log) << "Unable to get thread pointer!"; ++ return false; ++ } ++ ++ *address = tc.gpr[13]; ++ ++ return true; ++} ++ ++// Stubs for 32-bit functions not applicable on PPC64 ++bool GetFloatingPointRegisters32(pid_t tid, ++ FloatContext* context, ++ bool can_log) { return false; } ++bool GetThreadArea32(pid_t tid, ++ const ThreadContext &context, ++ LinuxVMAddress *address, ++ bool can_log) { return false; } ++ + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +@@ -528,6 +586,9 @@ bool Ptracer::GetThreadInfo(pid_t tid, T + if (is_64_bit_) { + return GetGeneralPurposeRegisters64(tid, &info->thread_context, can_log_) && + GetFloatingPointRegisters64(tid, &info->float_context, can_log_) && ++#if defined(ARCH_CPU_PPC64_FAMILY) ++ GetVectorRegisters64(tid, &info->vector_context, can_log_) && ++#endif + GetThreadArea64(tid, + info->thread_context, + &info->thread_specific_data_address, +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/thread_info.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/linux/thread_info.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/linux/thread_info.h +@@ -34,6 +34,10 @@ + #include + #endif + ++#if defined(ARCH_CPU_PPC64_FAMILY) ++#include ++#endif ++ + namespace crashpad { + + //! \brief The set of general purpose registers for an architecture family. +@@ -87,6 +91,8 @@ union ThreadContext { + uint32_t padding1_; + #elif defined(ARCH_CPU_RISCV64) + // 32 bit RISC-V not supported ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // PPC64 is 64-bit + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +@@ -144,6 +150,21 @@ union ThreadContext { + // Reflects user_regs_struct in asm/ptrace.h. + uint64_t pc; + uint64_t regs[31]; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Reflects struct pt_regs in asm/ptrace.h. ++ uint64_t gpr[32]; ++ uint64_t nip; ++ uint64_t msr; ++ uint64_t orig_gpr3; ++ uint64_t ctr; ++ uint64_t lnk; ++ uint64_t xer; ++ uint64_t ccr; ++ uint64_t softe; ++ uint64_t trap; ++ uint64_t dar; ++ uint64_t dsisr; ++ uint64_t result; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +@@ -156,6 +177,8 @@ union ThreadContext { + using NativeThreadContext = user_regs; + #elif defined(ARCH_CPU_MIPS_FAMILY) + // No appropriate NativeThreadsContext type available for MIPS ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ using NativeThreadContext = struct pt_regs; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY || ARCH_CPU_ARM64 || ARCH_CPU_RISCV64 +@@ -233,6 +256,9 @@ union FloatContext { + uint32_t fpu_id; + #elif defined(ARCH_CPU_RISCV64) + // 32 bit RISC-V not supported ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Crashpad's PPC support is 64-bit only, so this ++ // 32bit-only struct is declared as empty. + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +@@ -271,6 +297,10 @@ union FloatContext { + // Reflects __riscv_d_ext_state in asm/ptrace.h + uint64_t fpregs[32]; + uint64_t fcsr; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ // Reflects fpregset_t in sys/ucontext.h ++ double fpregs[32]; ++ double fpscr; + #else + #error Port. + #endif // ARCH_CPU_X86_FAMILY +@@ -302,6 +332,8 @@ union FloatContext { + // No appropriate floating point context native type for available MIPS. + #elif defined(ARCH_CPU_RISCV64) + static_assert(sizeof(f64) == sizeof(__riscv_d_ext_state), "Size mismatch"); ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ static_assert(sizeof(f64) == sizeof(fpregset_t), "Size mismatch"); + #else + #error Port. + #endif // ARCH_CPU_X86 +@@ -309,6 +341,26 @@ union FloatContext { + static_assert(std::is_standard_layout::value, + "Not standard layout"); + ++//! \brief The vector registers used for an architecture family ++union VectorContext { ++ struct v32_t {} v32; ++#if defined(ARCH_CPU_PPC64_FAMILY) ++ __attribute__((__aligned__(16))) // Vector context must be doubleword aligned. ++#endif ++ struct v64_t { ++#if defined(ARCH_CPU_PPC64_FAMILY) ++ // Reflects vrregset_t in sys/ucontext.h ++ uint32_t vrregs[32][4]; ++ struct { ++ uint32_t __pad[3]; ++ uint32_t vscr_word; ++ } vscr; ++ uint32_t vrsave; ++ uint32_t __pad[3]; ++#endif ++ } v64; ++}; ++ + //! \brief A collection of `ptrace`-able information about a thread. + struct ThreadInfo { + ThreadInfo(); +@@ -320,6 +372,9 @@ struct ThreadInfo { + //! \brief The floating point registers for the thread. + FloatContext float_context; + ++ //! \brief (Optional) The vector registers used for the thread. ++ VectorContext vector_context; ++ + //! \brief The thread-local storage address for the thread. + LinuxVMAddress thread_specific_data_address; + }; +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/misc/capture_context.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context.h +@@ -70,6 +70,7 @@ using NativeCPUContext = ucontext_t; + //! Linux | ARM/ARM64 | `r0`/`x0` + //! Linux | MIPS/MIPS64 | `$a0` + //! Linux | RISCV64 | `a0` ++//! Linux | PPC64 | `r3` + //! + //! Additionally, the value `LR` on ARM/ARM64 will be the return address of + //! this function. +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_linux.S +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/misc/capture_context_linux.S ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_linux.S +@@ -30,7 +30,7 @@ + .globl CAPTURECONTEXT_SYMBOL2 + #if defined(__i386__) || defined(__x86_64__) + .balign 16, 0x90 +-#elif defined(__arm__) || defined(__aarch64__) ++#elif defined(__arm__) || defined(__aarch64__) || defined(__powerpc64__) + .balign 4, 0x0 + .type CAPTURECONTEXT_SYMBOL, %function + .type CAPTURECONTEXT_SYMBOL2, %function +@@ -430,6 +430,216 @@ CAPTURECONTEXT_SYMBOL2: + + .set at + ++#elif defined(__powerpc64__) ++ // Store r0-r31 ++ std 0, 0xe8(3) // context->uc_mcontext.gp_regs[0] ++ std 1, 0xf0(3) // context->uc_mcontext.gp_regs[1] ++ std 2, 0xf8(3) // context->uc_mcontext.gp_regs[2] ++ // note that r3's original value was lost ++ std 3, 0x100(3) // context->uc_mcontext.gp_regs[3] ++ std 4, 0x108(3) // context->uc_mcontext.gp_regs[4] ++ std 5, 0x110(3) // context->uc_mcontext.gp_regs[5] ++ std 6, 0x118(3) // context->uc_mcontext.gp_regs[6] ++ std 7, 0x120(3) // context->uc_mcontext.gp_regs[7] ++ std 8, 0x128(3) // context->uc_mcontext.gp_regs[8] ++ std 9, 0x130(3) // context->uc_mcontext.gp_regs[9] ++ std 10, 0x138(3) // context->uc_mcontext.gp_regs[10] ++ std 11, 0x140(3) // context->uc_mcontext.gp_regs[11] ++ std 12, 0x148(3) // context->uc_mcontext.gp_regs[12] ++ std 13, 0x150(3) // context->uc_mcontext.gp_regs[13] ++ std 14, 0x158(3) // context->uc_mcontext.gp_regs[14] ++ std 15, 0x160(3) // context->uc_mcontext.gp_regs[15] ++ std 16, 0x168(3) // context->uc_mcontext.gp_regs[16] ++ std 17, 0x170(3) // context->uc_mcontext.gp_regs[17] ++ std 18, 0x178(3) // context->uc_mcontext.gp_regs[18] ++ std 19, 0x180(3) // context->uc_mcontext.gp_regs[19] ++ std 20, 0x188(3) // context->uc_mcontext.gp_regs[20] ++ std 21, 0x190(3) // context->uc_mcontext.gp_regs[21] ++ std 22, 0x198(3) // context->uc_mcontext.gp_regs[22] ++ std 23, 0x1a0(3) // context->uc_mcontext.gp_regs[23] ++ std 24, 0x1a8(3) // context->uc_mcontext.gp_regs[24] ++ std 25, 0x1b0(3) // context->uc_mcontext.gp_regs[25] ++ std 26, 0x1b8(3) // context->uc_mcontext.gp_regs[26] ++ std 27, 0x1c0(3) // context->uc_mcontext.gp_regs[27] ++ std 28, 0x1c8(3) // context->uc_mcontext.gp_regs[28] ++ std 29, 0x1d0(3) // context->uc_mcontext.gp_regs[29] ++ std 30, 0x1d8(3) // context->uc_mcontext.gp_regs[30] ++ std 31, 0x1e0(3) // context->uc_mcontext.gp_regs[31] ++ ++ // For NIP, we can use the value in the link register ++ mflr 0 ++ std 0, 0x1e8(3) // context->uc_mcontext.gp_regs[PT_NIP] ++ ++ // CTR ++ mfctr 0 ++ std 0, 0x200(3) // context->uc_mcontext.gp_regs[PT_CTR] ++ ++ // For LNK, we'll use the caller's LR save area (2 stack frames up). ++ // r4 can be used as a scratch register since it has already been saved. ++ ld 4, 0(1) ++ ld 4, 16(4) ++ std 4, 0x208(3) // context->uc_mcontext.gp_regs[PT_LNK] ++ ++ // XER ++ mfxer 0 ++ std 0, 0x210(3) // context->uc_mcontext.gp_regs[PT_XER] ++ ++ // CCR ++ mfcr 0 ++ std 0, 0x218(3) // context->uc_mcontext.gp_regs[PT_CCR] ++ ++ // MSR, orig_r3, MQ, TRAP, DAR, DSISR, RESULT, DSCR, ++ // not used or not relevant, zero them out. ++ li 4, 0 ++ std 4, 0x1f0(3) // context->uc_mcontext.gp_regs[PT_MSR] ++ std 4, 0x1f8(3) // context->uc_mcontext.gp_regs[PT_ORIG_R3] ++ std 4, 0x220(3) // context->uc_mcontext.gp_regs[PT_MQ] ++ std 4, 0x228(3) // context->uc_mcontext.gp_regs[PT_TRAP] ++ std 4, 0x230(3) // context->uc_mcontext.gp_regs[PT_DAR] ++ std 4, 0x238(3) // context->uc_mcontext.gp_regs[PT_DSISR] ++ std 4, 0x240(3) // context->uc_mcontext.gp_regs[PT_RESULT] ++ std 4, 0x248(3) // context->uc_mcontext.gp_regs[PT_DSCR] ++ ++ // Update context->uc_mcontext.regs to point to gp_regs ++ addi 0, 3, 0xe8 ++ std 0, 0xe0(3) ++ ++ // Save floating point registers 0-31 ++ stfd 0, 0x268(3) // context->uc_mcontext.fp_regs[0] ++ stfd 1, 0x270(3) // context->uc_mcontext.fp_regs[1] ++ stfd 2, 0x278(3) // context->uc_mcontext.fp_regs[2] ++ stfd 3, 0x280(3) // context->uc_mcontext.fp_regs[3] ++ stfd 4, 0x288(3) // context->uc_mcontext.fp_regs[4] ++ stfd 5, 0x290(3) // context->uc_mcontext.fp_regs[5] ++ stfd 6, 0x298(3) // context->uc_mcontext.fp_regs[6] ++ stfd 7, 0x2a0(3) // context->uc_mcontext.fp_regs[7] ++ stfd 8, 0x2a8(3) // context->uc_mcontext.fp_regs[8] ++ stfd 9, 0x2b0(3) // context->uc_mcontext.fp_regs[9] ++ stfd 10, 0x2b8(3) // context->uc_mcontext.fp_regs[10] ++ stfd 11, 0x2c0(3) // context->uc_mcontext.fp_regs[11] ++ stfd 12, 0x2c8(3) // context->uc_mcontext.fp_regs[12] ++ stfd 13, 0x2d0(3) // context->uc_mcontext.fp_regs[13] ++ stfd 14, 0x2d8(3) // context->uc_mcontext.fp_regs[14] ++ stfd 15, 0x2e0(3) // context->uc_mcontext.fp_regs[15] ++ stfd 16, 0x2e8(3) // context->uc_mcontext.fp_regs[16] ++ stfd 17, 0x2f0(3) // context->uc_mcontext.fp_regs[17] ++ stfd 18, 0x2f8(3) // context->uc_mcontext.fp_regs[18] ++ stfd 19, 0x300(3) // context->uc_mcontext.fp_regs[19] ++ stfd 20, 0x308(3) // context->uc_mcontext.fp_regs[20] ++ stfd 21, 0x310(3) // context->uc_mcontext.fp_regs[21] ++ stfd 22, 0x318(3) // context->uc_mcontext.fp_regs[22] ++ stfd 23, 0x320(3) // context->uc_mcontext.fp_regs[23] ++ stfd 24, 0x328(3) // context->uc_mcontext.fp_regs[24] ++ stfd 25, 0x330(3) // context->uc_mcontext.fp_regs[25] ++ stfd 26, 0x338(3) // context->uc_mcontext.fp_regs[26] ++ stfd 27, 0x340(3) // context->uc_mcontext.fp_regs[27] ++ stfd 28, 0x348(3) // context->uc_mcontext.fp_regs[28] ++ stfd 29, 0x350(3) // context->uc_mcontext.fp_regs[29] ++ stfd 30, 0x358(3) // context->uc_mcontext.fp_regs[30] ++ stfd 31, 0x360(3) // context->uc_mcontext.fp_regs[31] ++ ++ // FPSCR ++ mffs 0 ++ stfd 0, 0x368(3) // context->uc_mcontext.fp_regs[32] ++ ++ // Save VMX Vector registers ++ // Update r4 to contain the base address of vmx_reserve ++ addi 4, 3, 0x378 ++ // Ensure that it is quadword aligned ++ andi. 5, 4, 0xF ++ beq 1f // No alignment is necessary ++ // Address is doubleword aligned and not quadword aligned, add 8 ++ addi 4, 4, 8 ++ ++1: ++ // Store VMX registers 0-31 ++ // r4 will contain the base address ++ // r5 will contain the index ++ li 5, 0 ++ stvx 0, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 0] ++ addi 5, 5, 16 ++ stvx 1, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 1] ++ addi 5, 5, 16 ++ stvx 2, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 2] ++ addi 5, 5, 16 ++ stvx 3, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 3] ++ addi 5, 5, 16 ++ stvx 4, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 4] ++ addi 5, 5, 16 ++ stvx 5, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 5] ++ addi 5, 5, 16 ++ stvx 6, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 6] ++ addi 5, 5, 16 ++ stvx 7, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 7] ++ addi 5, 5, 16 ++ stvx 8, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 8] ++ addi 5, 5, 16 ++ stvx 9, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 9] ++ addi 5, 5, 16 ++ stvx 10, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 10] ++ addi 5, 5, 16 ++ stvx 11, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 11] ++ addi 5, 5, 16 ++ stvx 12, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 12] ++ addi 5, 5, 16 ++ stvx 13, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 13] ++ addi 5, 5, 16 ++ stvx 14, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 14] ++ addi 5, 5, 16 ++ stvx 15, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 15] ++ addi 5, 5, 16 ++ stvx 16, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 16] ++ addi 5, 5, 16 ++ stvx 17, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 17] ++ addi 5, 5, 16 ++ stvx 18, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 18] ++ addi 5, 5, 16 ++ stvx 19, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 19] ++ addi 5, 5, 16 ++ stvx 20, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 20] ++ addi 5, 5, 16 ++ stvx 21, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 21] ++ addi 5, 5, 16 ++ stvx 22, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 22] ++ addi 5, 5, 16 ++ stvx 23, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 23] ++ addi 5, 5, 16 ++ stvx 24, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 24] ++ addi 5, 5, 16 ++ stvx 25, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 25] ++ addi 5, 5, 16 ++ stvx 26, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 26] ++ addi 5, 5, 16 ++ stvx 27, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 27] ++ addi 5, 5, 16 ++ stvx 28, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 28] ++ addi 5, 5, 16 ++ stvx 29, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 29] ++ addi 5, 5, 16 ++ stvx 30, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 30] ++ addi 5, 5, 16 ++ stvx 31, 4, 5 // context->uc_mcontext.vmx_reserve[(align) + 31] ++ addi 5, 5, 16 ++ ++ // VSCR ++ mfvscr 0 ++ stvx 0, 4, 5 ++ addi 5, 5, 16 ++ ++ // VRSAVE ++ mfvrsave 0 ++ stwx 0, 4, 5 ++ ++ // Update context->uc_mcontext.v_regs to point to vmx_reserve + alignment. ++ std 4, 0x370(3) ++ ++ // Zero out all unused fields ++ li 4, 0 ++ std 4, 0xc8(3) // context->uc_mcontext.signal ++ std 4, 0xd0(3) // context->uc_mcontext.handler ++ std 4, 0xd8(3) // context->uc_mcontext.oldmask ++ ++ blr + #elif defined(__riscv) + + #define MCONTEXT_GREGS_OFFSET 176 +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_test.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/misc/capture_context_test.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_test.cc +@@ -48,7 +48,7 @@ void TestCaptureContext() { + uintptr_t pc = ProgramCounterFromContext(context_1); + + #if !defined(ADDRESS_SANITIZER) && !defined(ARCH_CPU_MIPS_FAMILY) && \ +- !defined(MEMORY_SANITIZER) ++ !defined(MEMORY_SANITIZER) && !defined(ARCH_CPU_PPC64_FAMILY) + // Sanitizers can cause enough code bloat that the “nearby” check would + // likely fail. + const uintptr_t kReferencePC = +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_test_util_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/misc/capture_context_test_util_linux.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/misc/capture_context_test_util_linux.cc +@@ -38,6 +38,8 @@ void SanityCheckContext(const NativeCPUC + #elif defined(ARCH_CPU_RISCV64) + EXPECT_EQ(context.uc_mcontext.__gregs[10], + FromPointerCast(&context)); ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ EXPECT_EQ(context.uc_mcontext.gp_regs[3], FromPointerCast(&context)); + #endif + } + +@@ -54,6 +56,8 @@ uintptr_t ProgramCounterFromContext(cons + return context.uc_mcontext.pc; + #elif defined(ARCH_CPU_RISCV64) + return context.uc_mcontext.__gregs[0]; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ return context.uc_mcontext.gp_regs[PT_NIP]; + #endif + } + +@@ -70,6 +74,8 @@ uintptr_t StackPointerFromContext(const + return context.uc_mcontext.gregs[29]; + #elif defined(ARCH_CPU_RISCV64) + return context.uc_mcontext.__gregs[2]; ++#elif defined(ARCH_CPU_PPC64_FAMILY) ++ return context.uc_mcontext.gp_regs[1]; + #endif + } + diff --git a/0001-Implement-support-for-ppc64-on-Linux.patch b/0001-Implement-support-for-ppc64-on-Linux.patch new file mode 100644 index 0000000..52d5bb8 --- /dev/null +++ b/0001-Implement-support-for-ppc64-on-Linux.patch @@ -0,0 +1,774 @@ +From cda9400739dfa064907d822f00578bb51b24a404 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Fri, 17 Aug 2018 14:18:33 -0500 +Subject: [PATCH] Implement support for ppc64 on Linux + +This patch implements support for the ppc64 architecture on Linux systems. + +Notable changes include: +* Modification of tests to support non-4K page sizes +* minidump_writer: Determine size of stack to capture based on page size +* dump_writer_common: Introduce member function GetVectorRegisters to + ThreadInfo on ppc64 systems. This allows Altivec/VMX registers to be + dumped like they are on OS X. linux_ptrace_dumper has been updated + to utilize this function along with the ptrace mode NT_PPC_VMX. +* processor/exploitability_unittest.cc: Tests were disabled on + non-x86 systems. They assume the system objdump is capable of + disassembling x86 binaries which is not the case on other + architectures. + +To-do: +* tools/linux/md2core has been updated as well, but functionality + has not been confirmed and restoration of Altivec/VMX registers + has not been implemented + +Note that proper functionality depends on updates to third_party/LSS +that introduce PPC64 support. An in-progress patch that allows +breakpad to build and run successfully is available at: +https://wiki.raptorcs.com/wiki/Porting/Chromium +--- + .../dump_writer_common/raw_context_cpu.h | 2 + + .../linux/dump_writer_common/thread_info.cc | 56 ++++++++++++++++++- + .../linux/dump_writer_common/thread_info.h | 9 +++ + .../dump_writer_common/ucontext_reader.cc | 42 ++++++++++++++ + .../dump_writer_common/ucontext_reader.h | 3 + + src/client/linux/handler/exception_handler.cc | 22 +++++++- + src/client/linux/handler/exception_handler.h | 6 +- + .../handler/exception_handler_unittest.cc | 8 ++- + .../microdump_writer/microdump_writer.cc | 14 ++++- + .../microdump_writer_unittest.cc | 15 ++++- + .../minidump_writer/linux_core_dumper.cc | 8 ++- + .../linux/minidump_writer/linux_dumper.cc | 4 +- + .../linux/minidump_writer/linux_dumper.h | 3 +- + .../linux_dumper_unittest_helper.cc | 2 + + .../minidump_writer/linux_ptrace_dumper.cc | 19 +++++-- + .../linux_ptrace_dumper_unittest.cc | 5 ++ + .../linux/minidump_writer/minidump_writer.cc | 18 ++++-- + .../linux/minidump_writer/minidump_writer.h | 2 + + .../minidump_writer_unittest.cc | 3 + + src/common/linux/memory_mapped_file.cc | 3 +- + .../linux/memory_mapped_file_unittest.cc | 7 ++- + src/common/memory_allocator_unittest.cc | 3 +- + src/processor/exploitability_linux.cc | 2 + + src/processor/exploitability_unittest.cc | 15 +++-- + src/tools/linux/md2core/minidump-2-core.cc | 45 +++++++++++++++ + 25 files changed, 281 insertions(+), 35 deletions(-) + +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/raw_context_cpu.h +@@ -51,6 +51,8 @@ typedef MDRawContextRISCV64 RawContextCP + # else + # error "Unexpected __riscv_xlen" + # endif ++#elif defined(__powerpc64__) ++typedef MDRawContextPPC64 RawContextCPU; + #else + #error "This code has not been ported to your platform yet." + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.cc +@@ -336,7 +336,42 @@ void ThreadInfo::FillCPUContext(RawConte + #error "Unexpected __riscv_xlen" + #endif + } +-#endif // __riscv ++ ++#elif defined(__powerpc64__) ++ ++uintptr_t ThreadInfo::GetInstructionPointer() const { ++ return mcontext.gp_regs[PT_NIP]; ++} ++ ++void ThreadInfo::FillCPUContext(RawContextCPU* out) const { ++ out->context_flags = MD_CONTEXT_PPC64_FULL; ++ for (int i = 0; i < MD_CONTEXT_PPC64_GPR_COUNT; i++) ++ out->gpr[i] = mcontext.gp_regs[i]; ++ ++ out->lr = mcontext.gp_regs[PT_LNK]; ++ out->srr0 = mcontext.gp_regs[PT_NIP]; ++ out->srr1 = mcontext.gp_regs[PT_MSR]; ++ out->cr = mcontext.gp_regs[PT_CCR]; ++ out->xer = mcontext.gp_regs[PT_XER]; ++ out->ctr = mcontext.gp_regs[PT_CTR]; ++ ++ for (int i = 0; i < MD_FLOATINGSAVEAREA_PPC_FPR_COUNT; i++) ++ out->float_save.fpregs[i] = mcontext.fp_regs[i]; ++ ++ out->float_save.fpscr = mcontext.fp_regs[NFPREG-1]; ++ ++ for (int i = 0; i < MD_VECTORSAVEAREA_PPC_VR_COUNT; i++) ++ out->vector_save.save_vr[i] = \ ++ {(((uint64_t)vregs.vrregs[i][0]) << 32) ++ | vregs.vrregs[i][1], ++ (((uint64_t)vregs.vrregs[i][2]) << 32) ++ | vregs.vrregs[i][3]}; ++ ++ out->vrsave = vregs.vrsave; ++ out->vector_save.save_vscr = {0, vregs.vscr.vscr_word}; ++ out->vector_save.save_vrvalid = 0xFFFFFFFF; ++} ++#endif // __powerpc64__ + + void ThreadInfo::GetGeneralPurposeRegisters(void** gp_regs, size_t* size) { + assert(gp_regs || size); +@@ -350,6 +385,11 @@ void ThreadInfo::GetGeneralPurposeRegist + *gp_regs = mcontext.__gregs; + if (size) + *size = sizeof(mcontext.__gregs); ++#elif defined(__powerpc64__) ++ if (gp_regs) ++ *gp_regs = mcontext.gp_regs; ++ if (size) ++ *size = sizeof(mcontext.gp_regs); + #else + if (gp_regs) + *gp_regs = ®s; +@@ -384,6 +424,11 @@ void ThreadInfo::GetFloatingPointRegiste + # else + # error "Unexpected __riscv_flen" + # endif ++#elif defined(__powerpc64__) ++ if (fp_regs) ++ *fp_regs = &mcontext.fp_regs; ++ if (size) ++ *size = sizeof(mcontext.fp_regs); + #else + if (fp_regs) + *fp_regs = &fpregs; +@@ -392,4 +437,13 @@ void ThreadInfo::GetFloatingPointRegiste + #endif + } + ++#if defined(__powerpc64__) ++void ThreadInfo::GetVectorRegisters(void** v_regs, size_t* size) { ++ if (v_regs) ++ *v_regs = &vregs; ++ if (size) ++ *size = sizeof(vregs); ++} ++#endif ++ + } // namespace google_breakpad +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/thread_info.h +@@ -67,6 +67,10 @@ struct ThreadInfo { + // Use the structures defined in + struct user_regs_struct regs; + struct user_fpsimd_struct fpregs; ++#elif defined(__powerpc64__) ++ // Use the structures defined in . ++ mcontext_t mcontext; ++ struct _libc_vrstate vregs; + #elif defined(__mips__) || defined(__riscv) + // Use the structure defined in . + mcontext_t mcontext; +@@ -83,6 +87,11 @@ struct ThreadInfo { + + // Returns the pointer and size of float point register area. + void GetFloatingPointRegisters(void** fp_regs, size_t* size); ++ ++#if defined(__powerpc64__) ++ // Returns the pointer and size of the vector register area. (PPC64 only) ++ void GetVectorRegisters(void** v_regs, size_t* size); ++#endif + }; + + } // namespace google_breakpad +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.cc +@@ -324,6 +324,48 @@ void UContextReader::FillCPUContext(RawC + #error "Unexpected __riscv_xlen" + #endif + } ++ ++#elif defined(__powerpc64__) ++ ++uintptr_t UContextReader::GetStackPointer(const ucontext_t* uc) { ++ return uc->uc_mcontext.gp_regs[MD_CONTEXT_PPC64_REG_SP]; ++} ++ ++uintptr_t UContextReader::GetInstructionPointer(const ucontext_t* uc) { ++ return uc->uc_mcontext.gp_regs[PT_NIP]; ++} ++ ++void UContextReader::FillCPUContext(RawContextCPU* out, const ucontext_t* uc, ++ const struct _libc_vrstate* vregs) { ++ out->context_flags = MD_CONTEXT_PPC64_FULL; ++ ++ for (int i = 0; i < MD_CONTEXT_PPC64_GPR_COUNT; i++) ++ out->gpr[i] = uc->uc_mcontext.gp_regs[i]; ++ ++ out->lr = uc->uc_mcontext.gp_regs[PT_LNK]; ++ out->srr0 = uc->uc_mcontext.gp_regs[PT_NIP]; ++ out->srr1 = uc->uc_mcontext.gp_regs[PT_MSR]; ++ out->cr = uc->uc_mcontext.gp_regs[PT_CCR]; ++ out->xer = uc->uc_mcontext.gp_regs[PT_XER]; ++ out->ctr = uc->uc_mcontext.gp_regs[PT_CTR]; ++ ++ for (int i = 0; i < MD_FLOATINGSAVEAREA_PPC_FPR_COUNT; i++) ++ out->float_save.fpregs[i] = uc->uc_mcontext.fp_regs[i]; ++ ++ out->float_save.fpscr = uc->uc_mcontext.fp_regs[NFPREG-1]; ++ ++ for (int i = 0; i < MD_VECTORSAVEAREA_PPC_VR_COUNT; i++) ++ out->vector_save.save_vr[i] = ++ {(((uint64_t)vregs->vrregs[i][0]) << 32) ++ | vregs->vrregs[i][1], ++ (((uint64_t)vregs->vrregs[i][2]) << 32) ++ | vregs->vrregs[i][3]}; ++ ++ out->vrsave = vregs->vrsave; ++ out->vector_save.save_vscr = {0, vregs->vscr.vscr_word}; ++ out->vector_save.save_vrvalid = 0xFFFFFFFF; ++} ++ + #endif + + } // namespace google_breakpad +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/dump_writer_common/ucontext_reader.h +@@ -54,6 +54,9 @@ struct UContextReader { + #elif defined(__aarch64__) + static void FillCPUContext(RawContextCPU* out, const ucontext_t* uc, + const struct fpsimd_context* fpregs); ++#elif defined(__powerpc64__) ++ static void FillCPUContext(RawContextCPU *out, const ucontext_t *uc, ++ const struct _libc_vrstate* vregs); + #else + static void FillCPUContext(RawContextCPU* out, const ucontext_t* uc); + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.cc +@@ -464,6 +464,13 @@ bool ExceptionHandler::HandleSignal(int + memcpy(&g_crash_context_.float_state, fp_ptr, + sizeof(g_crash_context_.float_state)); + } ++#elif defined(__powerpc64__) ++ // On PPC64, we must copy VR state ++ ucontext_t* uc_ptr = (ucontext_t*)uc; ++ if (uc_ptr->uc_mcontext.v_regs) { ++ memcpy(&g_crash_context_.vector_state, uc_ptr->uc_mcontext.v_regs, ++ sizeof(g_crash_context_.vector_state)); ++ } + #elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + ucontext_t* uc_ptr = (ucontext_t*)uc; + if (uc_ptr->uc_mcontext.fpregs) { +@@ -701,10 +708,18 @@ bool ExceptionHandler::WriteMinidump() { + } + #endif + +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE && !defined(__aarch64__) ++#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE && !defined(__aarch64__) \ ++ && !defined(__powerpc64__) + memcpy(&context.float_state, context.context.uc_mcontext.fpregs, + sizeof(context.float_state)); + #endif ++ ++#if defined(__powerpc64__) ++ // Vector registers must be copied on PPC64 ++ memcpy(&context.vector_state, context.context.uc_mcontext.v_regs, ++ sizeof(context.vector_state)); ++#endif ++ + context.tid = sys_gettid(); + + // Add an exception stream to the minidump for better reporting. +@@ -725,6 +740,9 @@ bool ExceptionHandler::WriteMinidump() { + #elif defined(__mips__) + context.siginfo.si_addr = + reinterpret_cast(context.context.uc_mcontext.pc); ++#elif defined(__powerpc64__) ++ context.siginfo.si_addr = ++ reinterpret_cast(context.context.uc_mcontext.gp_regs[PT_NIP]); + #elif defined(__riscv) + context.siginfo.si_addr = + reinterpret_cast(context.context.uc_mcontext.__gregs[REG_PC]); +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler.h +@@ -200,7 +200,11 @@ class ExceptionHandler { + siginfo_t siginfo; + pid_t tid; // the crashing thread. + ucontext_t context; +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ // PPC64's FP state is a part of ucontext_t like MIPS but the vector ++ // state is not, so a struct is needed. ++ vstate_t vector_state; ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + fpstate_t float_state; + #endif + }; +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/handler/exception_handler_unittest.cc +@@ -321,7 +321,7 @@ TEST(ExceptionHandlerTest, ParallelChild + ASSERT_EQ(SIGSEGV, WTERMSIG(status)); + return; + } else { +- usleep(100000); ++ usleep(200000); + } + } + +@@ -576,6 +576,8 @@ const unsigned char kIllegalInstruction[ + #if defined(__mips__) + // mfc2 zero,Impl - usually illegal in userspace. + 0x48, 0x00, 0x00, 0x48 ++#elif defined(__powerpc64__) ++ 0x01, 0x01, 0x01, 0x01 // Crashes on a tested POWER9 cpu + #else + // This crashes with SIGILL on x86/x86-64/arm. + 0xff, 0xff, 0xff, 0xff +@@ -771,10 +773,10 @@ TEST(ExceptionHandlerTest, InstructionPo + + // These are defined here so the parent can use them to check the + // data from the minidump afterwards. +- // Use 4k here because the OS will hand out a single page even ++ // Use the page size here because the OS will hand out a single page even + // if a smaller size is requested, and this test wants to + // test the upper bound of the memory range. +- const uint32_t kMemorySize = 4096; // bytes ++ const uint32_t kMemorySize = getpagesize(); // bytes + const int kOffset = kMemorySize - sizeof(kIllegalInstruction); + + const pid_t child = fork(); +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer.cc +@@ -141,7 +141,9 @@ class MicrodumpWriter { + const MicrodumpExtraInfo& microdump_extra_info, + LinuxDumper* dumper) + : ucontext_(context ? &context->context : NULL), +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ vector_state_(context ? &context->vector_state : NULL), ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -348,6 +350,8 @@ class MicrodumpWriter { + # else + # error "Unexpected __riscv_xlen" + # endif ++#elif defined(__powerpc64__) ++ const char kArch[] = "ppc64"; + #else + # error "This code has not been ported to your platform yet" + #endif +@@ -420,7 +424,9 @@ class MicrodumpWriter { + void DumpCPUState() { + RawContextCPU cpu; + my_memset(&cpu, 0, sizeof(RawContextCPU)); +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ UContextReader::FillCPUContext(&cpu, ucontext_, vector_state_); ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + UContextReader::FillCPUContext(&cpu, ucontext_, float_state_); + #else + UContextReader::FillCPUContext(&cpu, ucontext_); +@@ -616,7 +622,9 @@ class MicrodumpWriter { + void* Alloc(unsigned bytes) { return dumper_->allocator()->Alloc(bytes); } + + const ucontext_t* const ucontext_; +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ const google_breakpad::vstate_t* const vector_state_; ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + const google_breakpad::fpstate_t* const float_state_; + #endif + LinuxDumper* dumper_; +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/microdump_writer/microdump_writer_unittest.cc +@@ -282,10 +282,19 @@ TEST(MicrodumpWriterTest, BasicWithMappi + CrashAndGetMicrodump(mappings, MicrodumpExtraInfo(), &buf); + ASSERT_TRUE(ContainsMicrodump(buf)); + ++ int page_size = getpagesize(); + #ifdef __LP64__ +- ASSERT_NE(std::string::npos, +- buf.find("M 0000000000001000 000000000000002A 0000000000001000 " +- "33221100554477668899AABBCCDDEEFF0 libfoo.so")); ++ // This test is only available for the following page sizes ++ ASSERT_TRUE((page_size == 4096) || (page_size == 65536)); ++ if (page_size == 4096) { ++ ASSERT_NE(std::string::npos, ++ buf.find("M 0000000000001000 000000000000002A 0000000000001000 " ++ "33221100554477668899AABBCCDDEEFF0 libfoo.so")); ++ } else { ++ ASSERT_NE(std::string::npos, ++ buf.find("M 0000000000010000 000000000000002A 0000000000010000 " ++ "33221100554477668899AABBCCDDEEFF0 libfoo.so")); ++ } + #else + ASSERT_NE(std::string::npos, + buf.find("M 00001000 0000002A 00001000 " +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_core_dumper.cc +@@ -118,6 +118,9 @@ bool LinuxCoreDumper::GetThreadInfoByInd + #elif defined(__riscv) + stack_pointer = reinterpret_cast( + info->mcontext.__gregs[MD_CONTEXT_RISCV_REG_SP]); ++#elif defined(__powerpc64__) ++ stack_pointer = ++ reinterpret_cast(info->mcontext.gp_regs[MD_CONTEXT_PPC64_REG_SP]); + #else + # error "This code hasn't been ported to your platform yet." + #endif +@@ -213,7 +216,10 @@ bool LinuxCoreDumper::EnumerateThreads() + memset(&info, 0, sizeof(ThreadInfo)); + info.tgid = status->pr_pgrp; + info.ppid = status->pr_ppid; +-#if defined(__mips__) ++#if defined(__powerpc64__) ++ for (int i = 0; i < 31; i++) ++ info.mcontext.gp_regs[i] = status->pr_reg[i]; ++#elif defined(__mips__) + # if defined(__ANDROID__) + for (int i = EF_R0; i <= EF_R31; i++) + info.mcontext.gregs[i - EF_R0] = status->pr_reg[i]; +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc +@@ -770,7 +770,9 @@ bool LinuxDumper::GetStackInfo(const voi + reinterpret_cast(int_stack_pointer & ~(page_size - 1)); + + // The number of bytes of stack which we try to capture. +- static const ptrdiff_t kStackToCapture = 32 * 1024; ++ // This now depends on page_size to avoid missing data ++ // on systems with larger page sizes. ++ static const ptrdiff_t kStackToCapture = 8 * page_size; + + const MappingInfo* mapping = FindMapping(stack_pointer); + if (!mapping) +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.h +@@ -64,7 +64,8 @@ namespace google_breakpad { + typedef Elf32_auxv_t elf_aux_entry; + #elif defined(__x86_64) || defined(__aarch64__) || \ + (defined(__mips__) && _MIPS_SIM != _ABIO32) || \ +- (defined(__riscv) && __riscv_xlen == 64) ++ (defined(__riscv) && __riscv_xlen == 64) || \ ++ defined(__powerpc64__) + typedef Elf64_auxv_t elf_aux_entry; + #endif + +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc +@@ -56,6 +56,8 @@ + #define TID_PTR_REGISTER "$1" + #elif defined(__riscv) + #define TID_PTR_REGISTER "x4" ++#elif defined(__powerpc64__) ++#define TID_PTR_REGISTER "r8" + #else + #error This test has not been ported to this platform. + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +@@ -152,19 +152,27 @@ bool LinuxPtraceDumper::CopyFromProcess( + return true; + } + +-bool LinuxPtraceDumper::ReadRegisterSet(ThreadInfo* info, pid_t tid) +-{ ++bool LinuxPtraceDumper::ReadRegisterSet(ThreadInfo* info, pid_t tid) { + #ifdef PTRACE_GETREGSET + struct iovec io; + info->GetGeneralPurposeRegisters(&io.iov_base, &io.iov_len); +- if (sys_ptrace(PTRACE_GETREGSET, tid, (void*)NT_PRSTATUS, (void*)&io) == -1) { ++ if (ptrace(PTRACE_GETREGSET, tid, (void*)NT_PRSTATUS, (void*)&io) == -1) { + return false; + } + + info->GetFloatingPointRegisters(&io.iov_base, &io.iov_len); +- if (sys_ptrace(PTRACE_GETREGSET, tid, (void*)NT_FPREGSET, (void*)&io) == -1) { ++ if (ptrace(PTRACE_GETREGSET, tid, (void*)NT_FPREGSET, (void*)&io) == -1) { + return false; + } ++ ++#if defined(__powerpc64__) ++ // Grab the vector registers on PPC64 too ++ info->GetVectorRegisters(&io.iov_base, &io.iov_len); ++ if (ptrace(PTRACE_GETREGSET, tid, (void*)NT_PPC_VMX, (void*)&io) == -1) { ++ return false; ++ } ++#endif // defined(__powerpc64__) ++ + return true; + #else + return false; +@@ -312,6 +320,9 @@ bool LinuxPtraceDumper::GetThreadInfoByI + #elif defined(__riscv) + stack_pointer = reinterpret_cast( + info->mcontext.__gregs[MD_CONTEXT_RISCV_REG_SP]); ++#elif defined(__powerpc64__) ++ stack_pointer = ++ reinterpret_cast(info->mcontext.gp_regs[MD_CONTEXT_PPC64_REG_SP]); + #else + # error "This code hasn't been ported to your platform yet." + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_ptrace_dumper_unittest.cc +@@ -470,6 +470,9 @@ TEST(LinuxPtraceDumperTest, VerifyStackR + #elif defined(__riscv) + pid_t* process_tid_location = + reinterpret_cast(one_thread.mcontext.__gregs[4]); ++#elif defined(__powerpc64__) ++ pid_t* process_tid_location = ++ reinterpret_cast(one_thread.mcontext.gp_regs[8]); + #else + #error This test has not been ported to this platform. + #endif +@@ -569,6 +572,8 @@ TEST_F(LinuxPtraceDumperTest, SanitizeSt + uintptr_t heap_addr = thread_info.mcontext.gregs[1]; + #elif defined(__riscv) + uintptr_t heap_addr = thread_info.mcontext.__gregs[4]; ++#elif defined(__powerpc64__) ++ uintptr_t heap_addr = thread_info.mcontext.gp_regs[8]; + #else + #error This test has not been ported to this platform. + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.cc +@@ -144,7 +144,9 @@ class MinidumpWriter { + : fd_(minidump_fd), + path_(minidump_path), + ucontext_(context ? &context->context : NULL), +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ vector_state_(context ? &context->vector_state : NULL), ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + float_state_(context ? &context->float_state : NULL), + #endif + dumper_(dumper), +@@ -476,7 +478,9 @@ class MinidumpWriter { + if (!cpu.Allocate()) + return false; + my_memset(cpu.get(), 0, sizeof(RawContextCPU)); +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ UContextReader::FillCPUContext(cpu.get(), ucontext_, vector_state_); ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + UContextReader::FillCPUContext(cpu.get(), ucontext_, float_state_); + #else + UContextReader::FillCPUContext(cpu.get(), ucontext_); +@@ -953,7 +957,7 @@ class MinidumpWriter { + dirent->location.rva = 0; + } + +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || defined(__powerpc64__) + bool WriteCPUInformation(MDRawSystemInfo* sys_info) { + char vendor_id[sizeof(sys_info->cpu.x86_cpu_info.vendor_id) + 1] = {0}; + static const char vendor_id_name[] = "vendor_id"; +@@ -973,7 +977,9 @@ class MinidumpWriter { + + // processor_architecture should always be set, do this first + sys_info->processor_architecture = +-#if defined(__mips__) ++#if defined(__powerpc64__) ++ MD_CPU_ARCHITECTURE_PPC64; ++#elif defined(__mips__) + # if _MIPS_SIM == _ABIO32 + MD_CPU_ARCHITECTURE_MIPS; + # elif _MIPS_SIM == _ABI64 +@@ -1440,7 +1446,9 @@ class MinidumpWriter { + const char* path_; // Path to the file where the minidum should be written. + + const ucontext_t* const ucontext_; // also from the signal handler +-#if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE ++#if defined(__powerpc64__) ++ const google_breakpad::vstate_t* const vector_state_; ++#elif GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE + const google_breakpad::fpstate_t* const float_state_; // ditto + #endif + LinuxDumper* dumper_; +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer.h +@@ -47,6 +47,8 @@ class ExceptionHandler; + + #if defined(__aarch64__) + typedef struct fpsimd_context fpstate_t; ++#elif defined(__powerpc64__) ++typedef struct _libc_vrstate vstate_t; + #elif !defined(__ARM_EABI__) && !defined(__mips__) + typedef std::remove_pointer::type fpstate_t; + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/client/linux/minidump_writer/minidump_writer_unittest.cc +@@ -723,6 +723,9 @@ TEST(MinidumpWriterTest, InvalidStackPoi + #elif defined(__riscv) + context.context.uc_mcontext.__gregs[MD_CONTEXT_RISCV_REG_SP] = + invalid_stack_pointer; ++#elif defined(__powerpc64__) ++ context.context.uc_mcontext.gp_regs[MD_CONTEXT_PPC64_REG_SP] = ++ invalid_stack_pointer; + #else + # error "This code has not been ported to your platform yet." + #endif +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file.cc +@@ -72,8 +72,7 @@ bool MemoryMappedFile::Map(const char* p + + #if defined(__x86_64__) || defined(__aarch64__) || \ + (defined(__mips__) && _MIPS_SIM == _ABI64) || \ +- (defined(__riscv) && __riscv_xlen == 64) +- ++ (defined(__riscv) && __riscv_xlen == 64) || defined(__powerpc64__) + struct kernel_stat st; + if (sys_fstat(fd, &st) == -1 || st.st_size < 0) { + #else +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/linux/memory_mapped_file_unittest.cc +@@ -179,9 +179,10 @@ TEST_F(MemoryMappedFileTest, RemapAfterM + TEST_F(MemoryMappedFileTest, MapWithOffset) { + // Put more data in the test file this time. Offsets can only be + // done on page boundaries, so we need a two page file to test this. +- const int page_size = 4096; +- char data1[2 * page_size]; +- size_t data1_size = sizeof(data1); ++ const int page_size = getpagesize(); ++ char *data1 = static_cast(malloc(2 * page_size)); ++ EXPECT_TRUE(data1 != NULL); ++ size_t data1_size = (2 * page_size); + for (size_t i = 0; i < data1_size; ++i) { + data1[i] = i & 0x7f; + } +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/memory_allocator_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/common/memory_allocator_unittest.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/common/memory_allocator_unittest.cc +@@ -60,8 +60,9 @@ TEST(PageAllocatorTest, LargeObject) { + + EXPECT_EQ(0U, allocator.pages_allocated()); + uint8_t* p = reinterpret_cast(allocator.Alloc(10000)); ++ uint64_t expected_pages = 1 + ((10000 - 1) / getpagesize()); + ASSERT_FALSE(p == NULL); +- EXPECT_EQ(3U, allocator.pages_allocated()); ++ EXPECT_EQ(expected_pages, allocator.pages_allocated()); + for (unsigned i = 1; i < 10; ++i) { + uint8_t* p = reinterpret_cast(allocator.Alloc(i)); + ASSERT_FALSE(p == NULL); +Index: chromium-120.0.6099.71/third_party/breakpad/breakpad/src/tools/linux/md2core/minidump-2-core.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/breakpad/src/tools/linux/md2core/minidump-2-core.cc ++++ chromium-120.0.6099.71/third_party/breakpad/breakpad/src/tools/linux/md2core/minidump-2-core.cc +@@ -82,6 +82,8 @@ + #define ELF_ARCH EM_AARCH64 + #elif defined(__riscv) + #define ELF_ARCH EM_RISCV ++#elif defined(__powerpc64__) ++ #define ELF_ARCH EM_PPC64 + #endif + + #if defined(__arm__) +@@ -92,6 +94,8 @@ typedef user_regs user_regs_struct; + #elif defined (__mips__) || defined(__riscv) + // This file-local typedef simplifies the source code. + typedef gregset_t user_regs_struct; ++#elif defined(__powerpc64__) ++typedef struct pt_regs user_regs_struct; + #endif + + using google_breakpad::MDTypeHelper; +@@ -324,6 +328,9 @@ struct CrashedProcess { + #if defined(__aarch64__) + user_fpsimd_struct fpregs; + #endif ++#if defined(__powerpc64__) ++ mcontext_t mcontext; ++#endif + uintptr_t stack_addr; + const uint8_t* stack; + size_t stack_length; +@@ -599,6 +606,38 @@ ParseThreadRegisters(CrashedProcess::Thr + #error "Unexpected __riscv_xlen" + #endif + } ++#elif defined(__powerpc64__) ++static void ++ParseThreadRegisters(CrashedProcess::Thread* thread, ++ const MinidumpMemoryRange& range) { ++ const MDRawContextPPC64* rawregs = range.GetData(0); ++ ++ for (int i = 0; i < MD_CONTEXT_PPC64_GPR_COUNT; i++) ++ thread->mcontext.gp_regs[i] = rawregs->gpr[i]; ++ ++ thread->mcontext.gp_regs[PT_LNK] = rawregs->lr; ++ thread->mcontext.gp_regs[PT_NIP] = rawregs->srr0; ++ thread->mcontext.gp_regs[PT_MSR] = rawregs->srr1; ++ thread->mcontext.gp_regs[PT_CCR] = rawregs->cr; ++ thread->mcontext.gp_regs[PT_XER] = rawregs->xer; ++ thread->mcontext.gp_regs[PT_CTR] = rawregs->ctr; ++ thread->mcontext.v_regs->vrsave = rawregs->vrsave; ++ ++ for (int i = 0; i < MD_FLOATINGSAVEAREA_PPC_FPR_COUNT; i++) ++ thread->mcontext.fp_regs[i] = rawregs->float_save.fpregs[i]; ++ ++ thread->mcontext.fp_regs[NFPREG-1] = rawregs->float_save.fpscr; ++ ++ for (int i = 0; i < MD_VECTORSAVEAREA_PPC_VR_COUNT; i++) { ++ thread->mcontext.v_regs->vrregs[i][0] = rawregs->vector_save.save_vr[i].high >> 32; ++ thread->mcontext.v_regs->vrregs[i][1] = rawregs->vector_save.save_vr[i].high; ++ thread->mcontext.v_regs->vrregs[i][2] = rawregs->vector_save.save_vr[i].low >> 32; ++ thread->mcontext.v_regs->vrregs[i][3] = rawregs->vector_save.save_vr[i].low; ++ } ++ ++ thread->mcontext.v_regs->vscr.vscr_word = rawregs->vector_save.save_vscr.low & 0xFFFFFFFF; ++} ++ + #else + #error "This code has not been ported to your platform yet" + #endif +@@ -704,6 +743,12 @@ ParseSystemInfo(const Options& options, + # else + # error "Unexpected __riscv_xlen" + # endif ++#elif defined(__powerpc64__) ++ if (sysinfo->processor_architecture != MD_CPU_ARCHITECTURE_PPC64) { ++ fprintf(stderr, ++ "This version of minidump-2-core only supports PPC64.\n"); ++ exit(1); ++ } + #else + #error "This code has not been ported to your platform yet" + #endif diff --git a/0001-linux-seccomp-bpf-ppc64-glibc-workaround-in-SIGSYS-h.patch b/0001-linux-seccomp-bpf-ppc64-glibc-workaround-in-SIGSYS-h.patch new file mode 100644 index 0000000..636e674 --- /dev/null +++ b/0001-linux-seccomp-bpf-ppc64-glibc-workaround-in-SIGSYS-h.patch @@ -0,0 +1,37 @@ +From b3a14db7637232d30c878cc1f1ad6d8037e81379 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Tue, 15 Jan 2019 22:42:21 -0600 +Subject: [PATCH] linux/seccomp-bpf: ppc64+glibc workaround in SIGSYS handler + +Workaround for an apparent issue with glibc negating syscall +parameters. Observed on a ppc64le machine with glibc. +More investigation required. +--- + sandbox/linux/seccomp-bpf/trap.cc | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf/trap.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf/trap.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf/trap.cc +@@ -232,6 +232,20 @@ void Trap::SigSys(int nr, LinuxSigInfo* + SetIsInSigHandler(); + } + ++#if defined(__powerpc64__) ++ // On ppc64+glibc, some syscalls seem to accidentally negate the first ++ // parameter which causes checks against it to fail. For now, manually ++ // negate them back. ++ // TODO(shawn@anastas.io): investigate this issue further ++ auto nr = SECCOMP_SYSCALL(ctx); ++ if (nr == __NR_openat || nr == __NR_mkdirat || nr == __NR_faccessat || nr == __NR_readlinkat || ++ nr == __NR_renameat || nr == __NR_renameat2 || nr == __NR_newfstatat || nr == __NR_unlinkat) { ++ if (static_cast(SECCOMP_PARM1(ctx)) > 0) { ++ SECCOMP_PARM1(ctx) = -SECCOMP_PARM1(ctx); ++ } ++ } ++#endif ++ + // Copy the seccomp-specific data into a arch_seccomp_data structure. This + // is what we are showing to TrapFnc callbacks that the system call + // evaluator registered with the sandbox. diff --git a/0001-sandbox-Enable-seccomp_bpf-for-ppc64.patch b/0001-sandbox-Enable-seccomp_bpf-for-ppc64.patch new file mode 100644 index 0000000..a001309 --- /dev/null +++ b/0001-sandbox-Enable-seccomp_bpf-for-ppc64.patch @@ -0,0 +1,23 @@ +From 0c65e40ae578b743b5f06956597ebc9700768d18 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 22:45:47 -0500 +Subject: [PATCH 1/1] sandbox: Enable seccomp_bpf for ppc64 + +--- + sandbox/features.gni | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: chromium-120.0.6099.71/sandbox/features.gni +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/features.gni ++++ chromium-120.0.6099.71/sandbox/features.gni +@@ -9,7 +9,8 @@ + use_seccomp_bpf = (is_linux || is_chromeos || is_android) && + (current_cpu == "x86" || current_cpu == "x64" || + current_cpu == "arm" || current_cpu == "arm64" || +- current_cpu == "mipsel" || current_cpu == "mips64el") ++ current_cpu == "mipsel" || current_cpu == "mips64el" || ++ current_cpu == "ppc64") + + # SSBD (Speculative Store Bypass Disable) is a mitigation of Spectre Variant 4. + # As Spectre Variant 4 can be mitigated by site isolation, opt-out SSBD on site diff --git a/0001-sandbox-linux-Implement-partial-support-for-ppc64-sy.patch b/0001-sandbox-linux-Implement-partial-support-for-ppc64-sy.patch new file mode 100644 index 0000000..5982a19 --- /dev/null +++ b/0001-sandbox-linux-Implement-partial-support-for-ppc64-sy.patch @@ -0,0 +1,80 @@ +From 036d209a3f1a771de9aed31dfbe804aaf91d1c27 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 23:35:21 -0500 +Subject: [PATCH] sandbox/linux: Implement partial support for ppc64 syscalls + and ucontext + +Unlike other architectures, the ppc64 files currently rely on applicable +headers being provided by the system. It is sufficient for standard +GNU/Linux environments, but may require expansion elsewhere. +--- + sandbox/linux/BUILD.gn | 2 ++ + sandbox/linux/system_headers/linux_syscalls.h | 4 ++++ + sandbox/linux/system_headers/linux_ucontext.h | 2 ++ + sandbox/linux/system_headers/ppc64_linux_syscalls.h | 12 ++++++++++++ + sandbox/linux/system_headers/ppc64_linux_ucontext.h | 12 ++++++++++++ + 5 files changed, 32 insertions(+) + create mode 100644 sandbox/linux/system_headers/ppc64_linux_syscalls.h + create mode 100644 sandbox/linux/system_headers/ppc64_linux_ucontext.h + +Index: chromium-120.0.6099.71/sandbox/linux/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/BUILD.gn ++++ chromium-120.0.6099.71/sandbox/linux/BUILD.gn +@@ -383,6 +383,8 @@ component("sandbox_services") { + + source_set("sandbox_services_headers") { + sources = [ ++ "system_headers/ppc64_linux_syscalls.h", ++ "system_headers/ppc64_linux_ucontext.h", + "system_headers/arm64_linux_syscalls.h", + "system_headers/arm_linux_syscalls.h", + "system_headers/arm_linux_ucontext.h", +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/linux_syscalls.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/system_headers/linux_syscalls.h ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/linux_syscalls.h +@@ -35,5 +35,9 @@ + #include "sandbox/linux/system_headers/arm64_linux_syscalls.h" + #endif + ++#if defined(__powerpc64__) ++#include "sandbox/linux/system_headers/ppc64_linux_syscalls.h" ++#endif ++ + #endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_ + +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_syscalls.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_syscalls.h +@@ -0,0 +1,12 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_SYSCALLS_H_ ++#define SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_SYSCALLS_H_ ++ ++#include ++ ++//TODO: is it necessary to redefine syscall numbers for PPC64? ++ ++#endif // SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_SYSCALLS_H_ +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_ucontext.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_ucontext.h +@@ -0,0 +1,12 @@ ++// Copyright 2014 The Chromium Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_UCONTEXT_H_ ++#define SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_UCONTEXT_H_ ++ ++#include ++ ++//TODO: is it necessary to redefine ucontext on PPC64? ++ ++#endif // SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_UCONTEXT_H_ diff --git a/0001-sandbox-linux-Update-IsSyscallAllowed-in-broker_proc.patch b/0001-sandbox-linux-Update-IsSyscallAllowed-in-broker_proc.patch new file mode 100644 index 0000000..ca511f5 --- /dev/null +++ b/0001-sandbox-linux-Update-IsSyscallAllowed-in-broker_proc.patch @@ -0,0 +1,22 @@ +From c41cd6ac927f592b161abc04468d3c7a4be91995 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Tue, 23 Oct 2018 15:49:31 -0500 +Subject: [PATCH] sandbox/linux: Update IsSyscallAllowed in broker_process.cc + +--- + sandbox/linux/syscall_broker/broker_process.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: chromium-120.0.6099.71/sandbox/linux/syscall_broker/broker_process.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/syscall_broker/broker_process.cc ++++ chromium-120.0.6099.71/sandbox/linux/syscall_broker/broker_process.cc +@@ -169,7 +169,7 @@ bool BrokerProcess::IsSyscallBrokerable( + #if defined(__NR_fstatat64) + case __NR_fstatat64: + #endif +-#if defined(__x86_64__) || defined(__aarch64__) ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) + case __NR_newfstatat: + #endif + return !fast_check || policy_->allowed_command_set.test(COMMAND_STAT); diff --git a/0001-sandbox-linux-Update-syscall-helpers-lists-for-ppc64.patch b/0001-sandbox-linux-Update-syscall-helpers-lists-for-ppc64.patch new file mode 100644 index 0000000..70f27a2 --- /dev/null +++ b/0001-sandbox-linux-Update-syscall-helpers-lists-for-ppc64.patch @@ -0,0 +1,638 @@ +From da52663deec77f705d7d58b18484c3e28e563f10 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Tue, 18 Sep 2018 18:39:28 -0500 +Subject: [PATCH] sandbox/linux: Update syscall helpers/lists for ppc64 + +--- + .../seccomp-bpf-helpers/baseline_policy.cc | 8 +- + .../syscall_parameters_restrictions.cc | 2 +- + .../syscall_parameters_restrictions.h | 2 +- + .../linux/seccomp-bpf-helpers/syscall_sets.cc | 108 ++++++++++-------- + .../linux/seccomp-bpf-helpers/syscall_sets.h | 6 +- + sandbox/linux/services/syscall_wrappers.cc | 2 +- + 6 files changed, 73 insertions(+), 55 deletions(-) + +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc +@@ -90,7 +90,8 @@ bool IsBaselinePolicyWatched(int sysno) + SyscallSets::IsPrctl(sysno) || + SyscallSets::IsProcessGroupOrSession(sysno) || + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + SyscallSets::IsSocketCall(sysno) || + #endif + #if defined(__arm__) +@@ -255,7 +256,7 @@ ResultExpr EvaluateSyscallImpl(int fs_de + } + + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + if (sysno == __NR_mmap) + return RestrictMmapFlags(); + #endif +@@ -276,7 +277,7 @@ ResultExpr EvaluateSyscallImpl(int fs_de + return RestrictPrctl(); + + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + if (sysno == __NR_socketpair) { + // Only allow AF_UNIX, PF_UNIX. Crash if anything else is seen. + static_assert(AF_UNIX == PF_UNIX, +@@ -340,7 +341,8 @@ ResultExpr EvaluateSyscallImpl(int fs_de + } + + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + if (SyscallSets::IsSocketCall(sysno)) + return RestrictSocketcallCommand(); + #endif +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc +@@ -36,7 +36,7 @@ + #include "sandbox/linux/system_headers/linux_time.h" + + #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) && \ +- !defined(__arm__) && !defined(__aarch64__) && \ ++ !defined(__arm__) && !defined(__aarch64__) && !defined(__powerpc64__) && \ + !defined(PTRACE_GET_THREAD_AREA) + // Also include asm/ptrace-abi.h since ptrace.h in older libc (for instance + // the one in Ubuntu 16.04 LTS) is missing PTRACE_GET_THREAD_AREA. +@@ -45,6 +45,11 @@ + #include + #endif + ++// On PPC64, TCGETS is defined in terms of struct termios, so we must include termios.h ++#ifdef __powerpc64__ ++#include ++#endif ++ + #if BUILDFLAG(IS_ANDROID) + + #if !defined(F_DUPFD_CLOEXEC) +@@ -102,6 +107,15 @@ inline bool IsArchitectureMips() { + #endif + } + ++inline bool IsArchitecturePPC64() { ++#if defined(__powerpc64__) ++ return true; ++#else ++ return false; ++#endif ++} ++ ++ + // Ubuntu's version of glibc has a race condition in sem_post that can cause + // it to call futex(2) with bogus op arguments. To workaround this, we need + // to allow those futex(2) calls to fail with EINVAL, instead of crashing the +@@ -269,9 +283,11 @@ ResultExpr RestrictFcntlCommands() { + // operator. + // Glibc overrides the kernel's O_LARGEFILE value. Account for this. + uint64_t kOLargeFileFlag = O_LARGEFILE; +- if (IsArchitectureX86_64() || IsArchitectureI386() || IsArchitectureMips()) ++ if (IsArchitectureX86_64() || IsArchitectureI386() || IsArchitectureMips() \ ++ || IsArchitecturePPC64()) + kOLargeFileFlag = 0100000; + ++ + const Arg cmd(1); + const Arg long_arg(2); + +@@ -294,8 +310,17 @@ ResultExpr RestrictFcntlCommands() { + F_SETLKW, + F_GETLK, + F_DUPFD, +- F_DUPFD_CLOEXEC}, +- Allow()) ++ F_DUPFD_CLOEXEC ++#if defined(__powerpc64__) ++// On PPC64, F_SETLK, F_GETLK, F_SETLKW are defined as the 64-bit variants ++// but glibc will sometimes still use the 32-bit versions. Allow both. ++ , ++ 5, /* F_GETLK (32) */ ++ 6, /* F_SETLK (32) */ ++ 7 /* F_SETLKW (32) */ ++#endif ++ }, ++ Allow()) + .Case(F_SETFL, + If((long_arg & ~kAllowedMask) == 0, Allow()).Else(CrashSIGSYS())) + .Case(F_ADD_SEALS, +@@ -304,7 +329,7 @@ ResultExpr RestrictFcntlCommands() { + // clang-format on + } + +-#if defined(__i386__) || defined(__mips__) ++#if defined(__i386__) || defined(__mips__) || defined(__powerpc64__) + ResultExpr RestrictSocketcallCommand() { + // Unfortunately, we are unable to restrict the first parameter to + // socketpair(2). Whilst initially sounding bad, it's noteworthy that very +@@ -459,7 +484,7 @@ ResultExpr RestrictPtrace() { + #endif + return Switch(request) + .Cases({ +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__powerpc64__) + PTRACE_GETREGS, PTRACE_GETFPREGS, PTRACE_GET_THREAD_AREA, + PTRACE_GETREGSET, + #endif +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h +@@ -52,7 +52,7 @@ SANDBOX_EXPORT bpf_dsl::ResultExpr Restr + // O_NONBLOCK | O_SYNC | O_LARGEFILE | O_CLOEXEC | O_NOATIME. + SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictFcntlCommands(); + +-#if defined(__i386__) || defined(__mips__) ++#if defined(__i386__) || defined(__mips__) || defined(__powerpc64__) + // Restrict socketcall(2) to only allow socketpair(2), send(2), recv(2), + // sendto(2), recvfrom(2), shutdown(2), sendmsg(2) and recvmsg(2). + SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictSocketcallCommand(); +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -29,7 +29,8 @@ bool SyscallSets::IsAllowedGettime(int s + switch (sysno) { + case __NR_gettimeofday: + #if defined(__i386__) || defined(__x86_64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_time: + #endif + return true; +@@ -52,12 +53,14 @@ bool SyscallSets::IsAllowedGettime(int s + case __NR_clock_nanosleep_time64: // Parameters filtered by RestrictClockID(). + #endif + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_ftime: // Obsolete. + #endif + case __NR_settimeofday: // Privileged. + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_stime: + #endif + default: +@@ -136,7 +139,7 @@ bool SyscallSets::IsFileSystem(int sysno + case __NR_faccessat2: + case __NR_fchmodat: + case __NR_fchownat: // Should be called chownat ? +-#if defined(__x86_64__) || defined(__aarch64__) ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) + case __NR_newfstatat: // fstatat(). EPERM not a valid errno. + #elif defined(__i386__) || defined(__arm__) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) +@@ -155,7 +158,7 @@ bool SyscallSets::IsFileSystem(int sysno + case __NR_memfd_create: + case __NR_mkdirat: + case __NR_mknodat: +-#if defined(__i386__) ++#if defined(__i386__) || defined(__powerpc64__) + case __NR_oldlstat: + case __NR_oldstat: + #endif +@@ -169,7 +172,8 @@ bool SyscallSets::IsFileSystem(int sysno + #endif + case __NR_statfs: // EPERM not a valid errno. + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_statfs64: + #endif + case __NR_statx: // EPERM not a valid errno. +@@ -180,7 +184,8 @@ bool SyscallSets::IsFileSystem(int sysno + case __NR_truncate64: + #endif + case __NR_unlinkat: +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_utime: + #endif + case __NR_utimensat: // New. +@@ -220,7 +225,8 @@ bool SyscallSets::IsAllowedFileSystemAcc + #endif + return true; + // TODO(jln): these should be denied gracefully as well (moved below). +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_fadvise64: // EPERM not a valid errno. + #endif + #if defined(__i386__) +@@ -233,11 +239,12 @@ bool SyscallSets::IsAllowedFileSystemAcc + case __NR_flock: // EPERM not a valid errno. + case __NR_fstatfs: // Give information about the whole filesystem. + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_fstatfs64: + #endif + case __NR_fsync: // EPERM not a valid errno. +-#if defined(__i386__) ++#if defined(__i386__) || defined(__powerpc64__) + case __NR_oldfstat: + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +@@ -245,6 +252,8 @@ bool SyscallSets::IsAllowedFileSystemAcc + case __NR_sync_file_range: // EPERM not a valid errno. + #elif defined(__arm__) + case __NR_arm_sync_file_range: // EPERM not a valid errno. ++#elif defined(__powerpc64__) ++ case __NR_sync_file_range2: // EPERM not a valid errno. + #endif + default: + return false; +@@ -265,7 +274,8 @@ bool SyscallSets::IsDeniedFileSystemAcce + #endif + case __NR_getdents64: // EPERM not a valid errno. + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_readdir: + #endif + return true; +@@ -306,7 +316,7 @@ bool SyscallSets::IsGetSimpleId(int sysn + bool SyscallSets::IsProcessPrivilegeChange(int sysno) { + switch (sysno) { + case __NR_capset: +-#if defined(__i386__) || defined(__x86_64__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc64__) + case __NR_ioperm: // Intel privilege. + case __NR_iopl: // Intel privilege. + #endif +@@ -362,7 +372,8 @@ bool SyscallSets::IsAllowedSignalHandlin + // overflow. + case __NR_sigaltstack: + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_rt_sigtimedwait_time64: + case __NR_sigaction: + case __NR_sigprocmask: +@@ -378,7 +389,8 @@ bool SyscallSets::IsAllowedSignalHandlin + #endif + case __NR_signalfd4: + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_sigpending: + case __NR_sigsuspend: + #endif +@@ -402,7 +414,7 @@ bool SyscallSets::IsAllowedOperationOnFd + #endif + case __NR_dup3: + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_shutdown: + #endif + return true; +@@ -435,7 +447,7 @@ bool SyscallSets::IsAllowedProcessStartO + case __NR_exit_group: + case __NR_wait4: + case __NR_waitid: +-#if defined(__i386__) ++#if defined(__i386__) || defined(__powerpc64__) + case __NR_waitpid: + #endif + return true; +@@ -452,7 +464,7 @@ bool SyscallSets::IsAllowedProcessStartO + #endif + case __NR_set_tid_address: + case __NR_unshare: +-#if !defined(__mips__) && !defined(__aarch64__) ++#if !defined(__mips__) && !defined(__aarch64__) || defined(__powerpc64__) + case __NR_vfork: + #endif + default: +@@ -499,7 +511,7 @@ bool SyscallSets::IsAllowedEpoll(int sys + bool SyscallSets::IsDeniedGetOrModifySocket(int sysno) { + switch (sysno) { + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_accept: + case __NR_accept4: + case __NR_bind: +@@ -514,7 +526,8 @@ bool SyscallSets::IsDeniedGetOrModifySoc + } + + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + // Big multiplexing system call for sockets. + bool SyscallSets::IsSocketCall(int sysno) { + switch (sysno) { +@@ -528,7 +541,8 @@ bool SyscallSets::IsSocketCall(int sysno + } + #endif + +-#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) ++#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ ++ defined(__powerpc64__) + bool SyscallSets::IsNetworkSocketInformation(int sysno) { + switch (sysno) { + case __NR_getpeername: +@@ -553,7 +567,7 @@ bool SyscallSets::IsAllowedAddressSpaceA + case __NR_mincore: + case __NR_mlockall: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_mmap: + #endif + #if defined(__i386__) || defined(__arm__) || \ +@@ -583,7 +597,8 @@ bool SyscallSets::IsAllowedGeneralIo(int + switch (sysno) { + case __NR_lseek: + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR__llseek: + #endif + #if !defined(__aarch64__) +@@ -603,26 +618,28 @@ bool SyscallSets::IsAllowedGeneralIo(int + case __NR_readv: + case __NR_pread64: + #if defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_recv: + #endif + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_recvfrom: // Could specify source. + case __NR_recvmsg: // Could specify source. + #endif +-#if defined(__i386__) || defined(__x86_64__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc64__) + case __NR_select: + #endif +-#if defined(__i386__) || defined(__arm__) || defined(__mips__) ++#if defined(__i386__) || defined(__arm__) || defined(__mips__) || defined(__powerpc64__) + case __NR__newselect: + #endif + #if defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_send: + #endif + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_sendmsg: // Could specify destination. + case __NR_sendto: // Could specify destination. + #endif +@@ -678,7 +695,8 @@ bool SyscallSets::IsAllowedBasicSchedule + return true; + case __NR_getpriority: + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_nice: + #endif + case __NR_setpriority: +@@ -690,7 +708,8 @@ bool SyscallSets::IsAllowedBasicSchedule + bool SyscallSets::IsAdminOperation(int sysno) { + switch (sysno) { + #if defined(__i386__) || defined(__arm__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_bdflush: + #endif + case __NR_kexec_load: +@@ -706,7 +725,8 @@ bool SyscallSets::IsAdminOperation(int s + + bool SyscallSets::IsKernelModule(int sysno) { + switch (sysno) { +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_create_module: + case __NR_get_kernel_syms: // Should ENOSYS. + case __NR_query_module: +@@ -739,7 +759,8 @@ bool SyscallSets::IsFsControl(int sysno) + case __NR_swapoff: + case __NR_swapon: + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_umount: + #endif + case __NR_umount2: +@@ -755,7 +776,7 @@ bool SyscallSets::IsNuma(int sysno) { + case __NR_getcpu: + case __NR_mbind: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_migrate_pages: + #endif + case __NR_move_pages: +@@ -790,14 +811,15 @@ bool SyscallSets::IsGlobalProcessEnviron + switch (sysno) { + case __NR_acct: // Privileged. + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_getrlimit: + #endif +-#if defined(__i386__) || defined(__arm__) ++#if defined(__i386__) || defined(__arm__) || defined(__powerpc64__) + case __NR_ugetrlimit: + #endif + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_ulimit: + #endif + case __NR_getrusage: +@@ -831,7 +853,7 @@ bool SyscallSets::IsGlobalSystemStatus(i + #endif + case __NR_sysinfo: + case __NR_uname: +-#if defined(__i386__) ++#if defined(__i386__) || defined(__powerpc64__) + case __NR_olduname: + case __NR_oldolduname: + #endif +@@ -915,7 +937,8 @@ bool SyscallSets::IsSystemVSemaphores(in + + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ + defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__powerpc64__) + // These give a lot of ambient authority and bypass the setuid sandbox. + bool SyscallSets::IsSystemVSharedMemory(int sysno) { + switch (sysno) { +@@ -946,7 +969,8 @@ bool SyscallSets::IsSystemVMessageQueue( + #endif + + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + // Big system V multiplexing system call. + bool SyscallSets::IsSystemVIpc(int sysno) { + switch (sysno) { +@@ -966,7 +990,8 @@ bool SyscallSets::IsAnySystemV(int sysno + return IsSystemVMessageQueue(sysno) || IsSystemVSemaphores(sysno) || + IsSystemVSharedMemory(sysno); + #elif defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + return IsSystemVIpc(sysno); + #endif + } +@@ -1023,7 +1048,8 @@ bool SyscallSets::IsFaNotify(int sysno) + bool SyscallSets::IsTimer(int sysno) { + switch (sysno) { + case __NR_getitimer: +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_alarm: + #endif + case __NR_setitimer: +@@ -1105,18 +1131,22 @@ bool SyscallSets::IsMisc(int sysno) { + case __NR_syncfs: + case __NR_vhangup: + // The system calls below are not implemented. +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_afs_syscall: + #endif + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_break: + #endif +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_getpmsg: + #endif + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_gtty: + case __NR_idle: + case __NR_lock: +@@ -1124,20 +1154,22 @@ bool SyscallSets::IsMisc(int sysno) { + case __NR_prof: + case __NR_profil: + #endif +-#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) ++#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ ++ defined(__powerpc64__) + case __NR_putpmsg: + #endif + #if defined(__x86_64__) + case __NR_security: + #endif + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + case __NR_stty: + #endif +-#if defined(__x86_64__) ++#if defined(__x86_64__) || defined(__powerpc64__) + case __NR_tuxcall: + #endif +-#if !defined(__aarch64__) ++#if !defined(__aarch64__) && !defined(__powerpc64__) + case __NR_vserver: + #endif + return true; +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h +@@ -46,13 +46,14 @@ class SANDBOX_EXPORT SyscallSets { + static bool IsDeniedGetOrModifySocket(int sysno); + + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + // Big multiplexing system call for sockets. + static bool IsSocketCall(int sysno); + #endif + + #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + static bool IsNetworkSocketInformation(int sysno); + #endif + +@@ -84,7 +85,8 @@ class SANDBOX_EXPORT SyscallSets { + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ + defined(__aarch64__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_64_BITS)) || \ ++ defined(__powerpc64__) + // These give a lot of ambient authority and bypass the setuid sandbox. + static bool IsSystemVSharedMemory(int sysno); + #endif +@@ -95,7 +97,8 @@ class SANDBOX_EXPORT SyscallSets { + #endif + + #if defined(__i386__) || \ +- (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) ++ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ ++ defined(__powerpc64__) + // Big system V multiplexing system call. + static bool IsSystemVIpc(int sysno); + #endif +Index: chromium-120.0.6099.71/sandbox/linux/services/syscall_wrappers.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/services/syscall_wrappers.cc ++++ chromium-120.0.6099.71/sandbox/linux/services/syscall_wrappers.cc +@@ -61,7 +61,7 @@ long sys_clone(unsigned long flags, + #if defined(ARCH_CPU_X86_64) + return syscall(__NR_clone, flags, child_stack, ptid, ctid, tls); + #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_PPC64_FAMILY) + // CONFIG_CLONE_BACKWARDS defined. + return syscall(__NR_clone, flags, child_stack, ptid, tls, ctid); + #endif diff --git a/0001-sandbox-linux-bpf_dsl-Update-syscall-ranges-for-ppc6.patch b/0001-sandbox-linux-bpf_dsl-Update-syscall-ranges-for-ppc6.patch new file mode 100644 index 0000000..28a47cc --- /dev/null +++ b/0001-sandbox-linux-bpf_dsl-Update-syscall-ranges-for-ppc6.patch @@ -0,0 +1,27 @@ +From c9043a422fb4a5a6c72aaa1b907cea5f6a3061dd Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 19:10:24 -0500 +Subject: [PATCH 1/4] sandbox/linux/bpf_dsl: Update syscall ranges for ppc64 + +--- + sandbox/linux/bpf_dsl/linux_syscall_ranges.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +Index: chromium-120.0.6099.71/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ chromium-120.0.6099.71/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -56,6 +56,13 @@ + #define MAX_PUBLIC_SYSCALL __NR_syscalls + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL + ++#elif defined(__powerpc64__) ++ ++#include ++#define MIN_SYSCALL 0u ++#define MAX_PUBLIC_SYSCALL 386u ++#define MAX_SYSCALL MAX_PUBLIC_SYSCALL ++ + #else + #error "Unsupported architecture" + #endif diff --git a/0001-services-service_manager-sandbox-linux-Fix-TCGETS-de.patch b/0001-services-service_manager-sandbox-linux-Fix-TCGETS-de.patch new file mode 100644 index 0000000..4e50452 --- /dev/null +++ b/0001-services-service_manager-sandbox-linux-Fix-TCGETS-de.patch @@ -0,0 +1,26 @@ +From 7468b266532bd607eb1f5292d758256d800b2eee Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Fri, 10 Aug 2018 00:23:50 -0500 +Subject: [PATCH] services/service_manager/sandbox/linux: Fix TCGETS + declaration on PPC64 + +--- + .../sandbox/linux/bpf_renderer_policy_linux.cc | 5 +++++ + 1 file changed, 5 insertions(+) + +Index: chromium-120.0.6099.71/sandbox/policy/linux/bpf_renderer_policy_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/policy/linux/bpf_renderer_policy_linux.cc ++++ chromium-120.0.6099.71/sandbox/policy/linux/bpf_renderer_policy_linux.cc +@@ -15,6 +15,11 @@ + #include "sandbox/linux/system_headers/linux_syscalls.h" + #include "sandbox/policy/linux/sandbox_linux.h" + ++// On PPC64, TCGETS is defined in terms of struct termios, so we must include termios.h ++#ifdef __powerpc64__ ++#include ++#endif ++ + // TODO(vignatti): replace the local definitions below with #include + // once kernel version 4.6 becomes widely used. + #include diff --git a/0001-third_party-angle-Include-missing-header-cstddef-in-.patch b/0001-third_party-angle-Include-missing-header-cstddef-in-.patch new file mode 100644 index 0000000..96a3bb8 --- /dev/null +++ b/0001-third_party-angle-Include-missing-header-cstddef-in-.patch @@ -0,0 +1,12 @@ +Index: chromium-120.0.6099.71/third_party/angle/src/libANGLE/Constants.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/angle/src/libANGLE/Constants.h ++++ chromium-120.0.6099.71/third_party/angle/src/libANGLE/Constants.h +@@ -9,6 +9,7 @@ + #ifndef LIBANGLE_CONSTANTS_H_ + #define LIBANGLE_CONSTANTS_H_ + ++#include + #include "common/platform.h" + + #include diff --git a/0001-third_party-libvpx-Properly-generate-gni-on-ppc64.patch b/0001-third_party-libvpx-Properly-generate-gni-on-ppc64.patch new file mode 100644 index 0000000..5bb9538 --- /dev/null +++ b/0001-third_party-libvpx-Properly-generate-gni-on-ppc64.patch @@ -0,0 +1,23 @@ +From cc613c2e3bac8d4d1ff153700b819f964435923a Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Tue, 4 Sep 2018 18:16:07 -0500 +Subject: [PATCH] third_party/libvpx: Properly generate gni on ppc64 + +--- + third_party/libvpx/BUILD.gn | 2 ++ + third_party/libvpx/generate_gni.sh | 10 ++++++++++ + 2 files changed, 12 insertions(+) + +Index: chromium-120.0.6099.71/third_party/libvpx/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/BUILD.gn ++++ chromium-120.0.6099.71/third_party/libvpx/BUILD.gn +@@ -248,6 +248,8 @@ if (current_cpu == "x86" || (current_cpu + } else if (current_cpu == "x64") { + deps = [ ":libvpx_x86_64_headers" ] + sources = libvpx_srcs_x86_64_avx512 ++ } else if (current_cpu == "ppc64") { ++ sources = libvpx_srcs_ppc64 + } + } + } diff --git a/0001-third_party-lss-Don-t-look-for-mmap2-on-ppc64.patch b/0001-third_party-lss-Don-t-look-for-mmap2-on-ppc64.patch new file mode 100644 index 0000000..d906af8 --- /dev/null +++ b/0001-third_party-lss-Don-t-look-for-mmap2-on-ppc64.patch @@ -0,0 +1,22 @@ +Index: chromium-120.0.6099.71/third_party/lss/linux_syscall_support.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/lss/linux_syscall_support.h ++++ chromium-120.0.6099.71/third_party/lss/linux_syscall_support.h +@@ -4638,7 +4638,7 @@ struct kernel_statx { + LSS_REG(2, buf); + LSS_BODY(void*, mmap2, "0"(__r2)); + } +-#else ++#elif !defined(__powerpc64__) /* ppc64 doesn't have mmap2 */ + #define __NR__mmap2 __NR_mmap2 + LSS_INLINE _syscall6(void*, _mmap2, void*, s, + size_t, l, int, p, +@@ -4749,7 +4749,7 @@ struct kernel_statx { + #if defined(__i386__) || \ + defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ +- defined(__PPC__) || \ ++ (defined(__PPC__) && !defined(__powerpc64__)) || \ + (defined(__s390__) && !defined(__s390x__)) + /* On these architectures, implement mmap() with mmap2(). */ + LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, diff --git a/0001-third_party-pffft-Include-altivec.h-on-ppc64-with-SI.patch b/0001-third_party-pffft-Include-altivec.h-on-ppc64-with-SI.patch new file mode 100644 index 0000000..da8872a --- /dev/null +++ b/0001-third_party-pffft-Include-altivec.h-on-ppc64-with-SI.patch @@ -0,0 +1,22 @@ +From 63e63a70766f3059ce5f1d06a95988ecf909b298 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Wed, 24 Apr 2019 21:17:23 -0500 +Subject: [PATCH] third_party/pffft: Include altivec.h on ppc64 with SIMD + enabled + +--- + third_party/pffft/src/pffft.c | 1 + + 1 file changed, 1 insertion(+) + +Index: chromium-120.0.6099.71/third_party/pffft/src/pffft.c +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/pffft/src/pffft.c ++++ chromium-120.0.6099.71/third_party/pffft/src/pffft.c +@@ -100,6 +100,7 @@ + Altivec support macros + */ + #if !defined(PFFFT_SIMD_DISABLE) && (defined(__ppc__) || defined(__ppc64__)) ++#include + typedef vector float v4sf; + # define SIMD_SZ 4 + # define VZERO() ((vector float) vec_splat_u8(0)) diff --git a/0002-Add-ppc64-trap-instructions.patch b/0002-Add-ppc64-trap-instructions.patch new file mode 100644 index 0000000..063072b --- /dev/null +++ b/0002-Add-ppc64-trap-instructions.patch @@ -0,0 +1,16 @@ +--- a/v8/src/base/immediate-crash.h ++++ b/v8/src/base/immediate-crash.h +@@ -93,6 +93,13 @@ + #define TRAP_SEQUENCE1_() asm volatile(".2byte 0x0001"); + #define TRAP_SEQUENCE2_() asm volatile("") + ++#elif V8_HOST_ARCH_PPC64 ++ ++#define TRAP_SEQUENCE1_() asm volatile("trap") ++// Intentionally empty: __builtin_unreachable() is always part of the sequence ++// (see IMMEDIATE_CRASH below) ++#define TRAP_SEQUENCE2_() asm volatile("") ++ + #else + + // Crash report accuracy will not be guaranteed on other architectures, but at diff --git a/0002-Highway-disable-128-bit-vsx.patch b/0002-Highway-disable-128-bit-vsx.patch new file mode 100644 index 0000000..3b1e7e7 --- /dev/null +++ b/0002-Highway-disable-128-bit-vsx.patch @@ -0,0 +1,101 @@ +Index: chromium-120.0.6099.71/third_party/highway/src/hwy/ops/ppc_vsx-inl.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/highway/src/hwy/ops/ppc_vsx-inl.h ++++ chromium-120.0.6099.71/third_party/highway/src/hwy/ops/ppc_vsx-inl.h +@@ -36,6 +36,8 @@ + + #include "hwy/ops/shared-inl.h" + ++#undef __SIZEOF_INT128__ ++ + HWY_BEFORE_NAMESPACE(); + namespace hwy { + namespace HWY_NAMESPACE { +@@ -2844,7 +2846,6 @@ struct CompressIsPartition { + + namespace detail { + +-#if HWY_TARGET > HWY_PPC10 || __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + // fallback for missing vec_extractm + template + HWY_INLINE uint64_t ExtractSignBits(Vec128 sign_bits, +@@ -2857,22 +2858,16 @@ HWY_INLINE uint64_t ExtractSignBits(Vec1 + return extracted.raw[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__]; + } + +-#endif // HWY_TARGET > HWY_PPC10 +- + template + HWY_INLINE uint64_t BitsFromMask(hwy::SizeTag<1> /*tag*/, + Mask128 mask) { + const DFromM d; + const Repartition du8; + const VFromD sign_bits = BitCast(du8, VecFromMask(d, mask)); +-#if HWY_TARGET <= HWY_PPC10 && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +- return static_cast(vec_extractm(sign_bits.raw)); +-#else + const __vector unsigned char kBitShuffle = { + 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 + }; + return ExtractSignBits(sign_bits, kBitShuffle); +-#endif // HWY_TARGET <= HWY_PPC10 + } + + template +@@ -2882,10 +2877,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + const Repartition du8; + const VFromD sign_bits = BitCast(du8, VecFromMask(d, mask)); + +-#if HWY_TARGET <= HWY_PPC10 && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +- const RebindToUnsigned du; +- return static_cast(vec_extractm(BitCast(du, sign_bits).raw)); +-#else + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const __vector unsigned char kBitShuffle = { + 112, 96, 80, 64, 48, 32, 16, 0, 128, 128, 128, 128, 128, 128, 128, 128}; +@@ -2894,7 +2885,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + 128, 128, 128, 128, 128, 128, 128, 128, 112, 96, 80, 64, 48, 32, 16, 0}; + #endif + return ExtractSignBits(sign_bits, kBitShuffle); +-#endif // HWY_TARGET <= HWY_PPC10 + } + + template +@@ -2903,10 +2893,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + const DFromM d; + const Repartition du8; + const VFromD sign_bits = BitCast(du8, VecFromMask(d, mask)); +-#if HWY_TARGET <= HWY_PPC10 && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +- const RebindToUnsigned du; +- return static_cast(vec_extractm(BitCast(du, sign_bits).raw)); +-#else + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const __vector unsigned char kBitShuffle = { + 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; +@@ -2915,7 +2901,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 96, 64, 32, 0}; + #endif + return ExtractSignBits(sign_bits, kBitShuffle); +-#endif // HWY_TARGET <= HWY_PPC10 + } + + template +@@ -2924,10 +2909,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + const DFromM d; + const Repartition du8; + const VFromD sign_bits = BitCast(du8, VecFromMask(d, mask)); +-#if HWY_TARGET <= HWY_PPC10 && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +- const RebindToUnsigned du; +- return static_cast(vec_extractm(BitCast(du, sign_bits).raw)); +-#else + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const __vector unsigned char kBitShuffle = {64, 0, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, +@@ -2938,7 +2919,6 @@ HWY_INLINE uint64_t BitsFromMask(hwy::Si + 128, 128, 64, 0}; + #endif + return ExtractSignBits(sign_bits, kBitShuffle); +-#endif // HWY_TARGET <= HWY_PPC10 + } + + // Returns the lowest N of the mask bits. diff --git a/0002-Include-cstddef-to-fix-build.patch b/0002-Include-cstddef-to-fix-build.patch new file mode 100644 index 0000000..927ac8b --- /dev/null +++ b/0002-Include-cstddef-to-fix-build.patch @@ -0,0 +1,21 @@ +From 82922bf486e9926a171152f61030dfcd53f017b8 Mon Sep 17 00:00:00 2001 +From: Timothy Pearson +Date: Thu, 30 Aug 2018 17:32:05 -0500 +Subject: [PATCH] Include cstddef to fix build + +size_t is not defined unless cstddef is included. +--- + third_party/crashpad/crashpad/compat/linux/sys/user.h | 1 + + +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/compat/linux/sys/user.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/compat/linux/sys/user.h ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/compat/linux/sys/user.h +@@ -15,6 +15,7 @@ + #ifndef CRASHPAD_COMPAT_LINUX_SYS_USER_H_ + #define CRASHPAD_COMPAT_LINUX_SYS_USER_H_ + ++#include + #include_next + + #include diff --git a/0002-sandbox-linux-bpf_dsl-Modify-seccomp_macros-to-add-s.patch b/0002-sandbox-linux-bpf_dsl-Modify-seccomp_macros-to-add-s.patch new file mode 100644 index 0000000..1847fd0 --- /dev/null +++ b/0002-sandbox-linux-bpf_dsl-Modify-seccomp_macros-to-add-s.patch @@ -0,0 +1,76 @@ +From 3c4705bfdda45feb860a1f121631773e5fe8e53f Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 19:11:26 -0500 +Subject: [PATCH 1/4] sandbox/linux/bpf_dsl: Modify seccomp_macros to add + support for ppc64 + +--- + sandbox/linux/bpf_dsl/seccomp_macros.h | 43 ++++++++++++++++++++++++++ + 1 file changed, 43 insertions(+) + +Index: chromium-120.0.6099.71/sandbox/linux/bpf_dsl/seccomp_macros.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/bpf_dsl/seccomp_macros.h ++++ chromium-120.0.6099.71/sandbox/linux/bpf_dsl/seccomp_macros.h +@@ -14,6 +14,9 @@ + #if defined(__mips__) + // sys/user.h in eglibc misses size_t definition + #include ++#elif defined(__powerpc64__) ++// Manually define greg_t on ppc64 ++typedef unsigned long long greg_t; + #endif + #endif + +@@ -343,6 +346,51 @@ struct regs_struct { + #define SECCOMP_PT_PARM4(_regs) (_regs).regs[3] + #define SECCOMP_PT_PARM5(_regs) (_regs).regs[4] + #define SECCOMP_PT_PARM6(_regs) (_regs).regs[5] ++ ++#elif defined(__powerpc64__) ++#include ++ ++typedef struct pt_regs regs_struct; ++ ++#ifdef ARCH_CPU_LITTLE_ENDIAN ++#define SECCOMP_ARCH AUDIT_ARCH_PPC64LE ++#else ++#define SECCOMP_ARCH AUDIT_ARCH_PPC64 ++#endif ++ ++#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.regs->gpr[_reg]) ++ ++#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, 3) ++#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, 0) ++#define SECCOMP_IP(_ctx) (_ctx)->uc_mcontext.regs->nip ++#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, 3) ++#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, 4) ++#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, 5) ++#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, 6) ++#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, 7) ++#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, 8) ++ ++#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr)) ++#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch)) ++#define SECCOMP_IP_MSB_IDX \ ++ (offsetof(struct arch_seccomp_data, instruction_pointer) + 4) ++#define SECCOMP_IP_LSB_IDX \ ++ (offsetof(struct arch_seccomp_data, instruction_pointer) + 0) ++#define SECCOMP_ARG_MSB_IDX(nr) \ ++ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 4) ++#define SECCOMP_ARG_LSB_IDX(nr) \ ++ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 0) ++ ++#define SECCOMP_PT_RESULT(_regs) (_regs).gpr[3] ++#define SECCOMP_PT_SYSCALL(_regs) (_regs).gpr[0] ++#define SECCOMP_PT_IP(_regs) (_regs).nip ++#define SECCOMP_PT_PARM1(_regs) (_regs).gpr[3] ++#define SECCOMP_PT_PARM2(_regs) (_regs).gpr[4] ++#define SECCOMP_PT_PARM3(_regs) (_regs).gpr[5] ++#define SECCOMP_PT_PARM4(_regs) (_regs).gpr[6] ++#define SECCOMP_PT_PARM5(_regs) (_regs).gpr[7] ++#define SECCOMP_PT_PARM6(_regs) (_regs).gpr[8] ++ + #else + #error Unsupported target platform + diff --git a/0002-third-party-boringssl-add-generated-files.patch b/0002-third-party-boringssl-add-generated-files.patch new file mode 100644 index 0000000..b4f8d28 --- /dev/null +++ b/0002-third-party-boringssl-add-generated-files.patch @@ -0,0 +1,5713 @@ +Index: chromium-120.0.6099.71/third_party/boringssl/BUILD.generated.gni +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/boringssl/BUILD.generated.gni ++++ chromium-120.0.6099.71/third_party/boringssl/BUILD.generated.gni +@@ -94,6 +94,7 @@ crypto_sources = [ + "src/crypto/cpu_arm_linux.c", + "src/crypto/cpu_arm_linux.h", + "src/crypto/cpu_intel.c", ++ "src/crypto/cpu_ppc64le.c", + "src/crypto/crypto.c", + "src/crypto/curve25519/curve25519.c", + "src/crypto/curve25519/curve25519_64_adx.c", +@@ -389,6 +390,9 @@ crypto_sources_asm = [ + "linux-arm/crypto/fipsmodule/sha512-armv4-linux.S", + "linux-arm/crypto/fipsmodule/vpaes-armv7-linux.S", + "linux-arm/crypto/test/trampoline-armv4-linux.S", ++ "linux-ppc64le/crypto/fipsmodule/aesp8-ppc-linux.S", ++ "linux-ppc64le/crypto/fipsmodule/ghashp8-ppc-linux.S", ++ "linux-ppc64le/crypto/test/trampoline-ppc-linux.S", + "linux-x86/crypto/chacha/chacha-x86-linux.S", + "linux-x86/crypto/fipsmodule/aesni-x86-linux.S", + "linux-x86/crypto/fipsmodule/bn-586-linux.S", +Index: chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/aesp8-ppc-linux.S +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/aesp8-ppc-linux.S +@@ -0,0 +1,3673 @@ ++// This file is generated from a similarly-named Perl script in the BoringSSL ++// source tree. Do not edit by hand. ++ ++#if defined(__has_feature) ++#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) ++#define OPENSSL_NO_ASM ++#endif ++#endif ++ ++#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) && defined(__ELF__) ++.machine "any" ++ ++.abiversion 2 ++.text ++ ++.align 7 ++.Lrcon: ++.byte 0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01 ++.byte 0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x1b ++.byte 0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d,0x0c,0x0f,0x0e,0x0d ++.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 ++.Lconsts: ++ mflr 0 ++ bcl 20,31,$+4 ++ mflr 6 ++ addi 6,6,-0x48 ++ mtlr 0 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,0,0 ++.byte 65,69,83,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 ++.align 2 ++ ++.globl aes_hw_set_encrypt_key ++.type aes_hw_set_encrypt_key,@function ++.align 5 ++aes_hw_set_encrypt_key: ++.localentry aes_hw_set_encrypt_key,0 ++ ++.Lset_encrypt_key: ++ mflr 11 ++ std 11,16(1) ++ ++ li 6,-1 ++ cmpldi 3,0 ++ beq- .Lenc_key_abort ++ cmpldi 5,0 ++ beq- .Lenc_key_abort ++ li 6,-2 ++ cmpwi 4,128 ++ blt- .Lenc_key_abort ++ cmpwi 4,256 ++ bgt- .Lenc_key_abort ++ andi. 0,4,0x3f ++ bne- .Lenc_key_abort ++ ++ lis 0,0xfff0 ++ li 12,-1 ++ or 0,0,0 ++ ++ bl .Lconsts ++ mtlr 11 ++ ++ neg 9,3 ++ lvx 1,0,3 ++ addi 3,3,15 ++ lvsr 3,0,9 ++ li 8,0x20 ++ cmpwi 4,192 ++ lvx 2,0,3 ++ vspltisb 5,0x0f ++ lvx 4,0,6 ++ vxor 3,3,5 ++ lvx 5,8,6 ++ addi 6,6,0x10 ++ vperm 1,1,2,3 ++ li 7,8 ++ vxor 0,0,0 ++ mtctr 7 ++ ++ lvsl 8,0,5 ++ vspltisb 9,-1 ++ lvx 10,0,5 ++ vperm 9,9,0,8 ++ ++ blt .Loop128 ++ addi 3,3,8 ++ beq .L192 ++ addi 3,3,8 ++ b .L256 ++ ++.align 4 ++.Loop128: ++ vperm 3,1,1,5 ++ vsldoi 6,0,1,12 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ .long 0x10632509 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vadduwm 4,4,4 ++ vxor 1,1,3 ++ bdnz .Loop128 ++ ++ lvx 4,0,6 ++ ++ vperm 3,1,1,5 ++ vsldoi 6,0,1,12 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ .long 0x10632509 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vadduwm 4,4,4 ++ vxor 1,1,3 ++ ++ vperm 3,1,1,5 ++ vsldoi 6,0,1,12 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ .long 0x10632509 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vxor 1,1,3 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ stvx 7,0,5 ++ ++ addi 3,5,15 ++ addi 5,5,0x50 ++ ++ li 8,10 ++ b .Ldone ++ ++.align 4 ++.L192: ++ lvx 6,0,3 ++ li 7,4 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ stvx 7,0,5 ++ addi 5,5,16 ++ vperm 2,2,6,3 ++ vspltisb 3,8 ++ mtctr 7 ++ vsububm 5,5,3 ++ ++.Loop192: ++ vperm 3,2,2,5 ++ vsldoi 6,0,1,12 ++ .long 0x10632509 ++ ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ ++ vsldoi 7,0,2,8 ++ vspltw 6,1,3 ++ vxor 6,6,2 ++ vsldoi 2,0,2,12 ++ vadduwm 4,4,4 ++ vxor 2,2,6 ++ vxor 1,1,3 ++ vxor 2,2,3 ++ vsldoi 7,7,1,8 ++ ++ vperm 3,2,2,5 ++ vsldoi 6,0,1,12 ++ vperm 11,7,7,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ .long 0x10632509 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vsldoi 7,1,2,8 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vperm 11,7,7,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vspltw 6,1,3 ++ vxor 6,6,2 ++ vsldoi 2,0,2,12 ++ vadduwm 4,4,4 ++ vxor 2,2,6 ++ vxor 1,1,3 ++ vxor 2,2,3 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ stvx 7,0,5 ++ addi 3,5,15 ++ addi 5,5,16 ++ bdnz .Loop192 ++ ++ li 8,12 ++ addi 5,5,0x20 ++ b .Ldone ++ ++.align 4 ++.L256: ++ lvx 6,0,3 ++ li 7,7 ++ li 8,14 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ stvx 7,0,5 ++ addi 5,5,16 ++ vperm 2,2,6,3 ++ mtctr 7 ++ ++.Loop256: ++ vperm 3,2,2,5 ++ vsldoi 6,0,1,12 ++ vperm 11,2,2,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ .long 0x10632509 ++ stvx 7,0,5 ++ addi 5,5,16 ++ ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vsldoi 6,0,6,12 ++ vxor 1,1,6 ++ vadduwm 4,4,4 ++ vxor 1,1,3 ++ vperm 11,1,1,8 ++ vsel 7,10,11,9 ++ vor 10,11,11 ++ stvx 7,0,5 ++ addi 3,5,15 ++ addi 5,5,16 ++ bdz .Ldone ++ ++ vspltw 3,1,3 ++ vsldoi 6,0,2,12 ++ .long 0x106305C8 ++ ++ vxor 2,2,6 ++ vsldoi 6,0,6,12 ++ vxor 2,2,6 ++ vsldoi 6,0,6,12 ++ vxor 2,2,6 ++ ++ vxor 2,2,3 ++ b .Loop256 ++ ++.align 4 ++.Ldone: ++ lvx 2,0,3 ++ vsel 2,10,2,9 ++ stvx 2,0,3 ++ li 6,0 ++ or 12,12,12 ++ stw 8,0(5) ++ ++.Lenc_key_abort: ++ mr 3,6 ++ blr ++.long 0 ++.byte 0,12,0x14,1,0,0,3,0 ++.long 0 ++.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key ++ ++.globl aes_hw_set_decrypt_key ++.type aes_hw_set_decrypt_key,@function ++.align 5 ++aes_hw_set_decrypt_key: ++.localentry aes_hw_set_decrypt_key,0 ++ ++ stdu 1,-64(1) ++ mflr 10 ++ std 10,80(1) ++ bl .Lset_encrypt_key ++ mtlr 10 ++ ++ cmpwi 3,0 ++ bne- .Ldec_key_abort ++ ++ slwi 7,8,4 ++ subi 3,5,240 ++ srwi 8,8,1 ++ add 5,3,7 ++ mtctr 8 ++ ++.Ldeckey: ++ lwz 0, 0(3) ++ lwz 6, 4(3) ++ lwz 7, 8(3) ++ lwz 8, 12(3) ++ addi 3,3,16 ++ lwz 9, 0(5) ++ lwz 10,4(5) ++ lwz 11,8(5) ++ lwz 12,12(5) ++ stw 0, 0(5) ++ stw 6, 4(5) ++ stw 7, 8(5) ++ stw 8, 12(5) ++ subi 5,5,16 ++ stw 9, -16(3) ++ stw 10,-12(3) ++ stw 11,-8(3) ++ stw 12,-4(3) ++ bdnz .Ldeckey ++ ++ xor 3,3,3 ++.Ldec_key_abort: ++ addi 1,1,64 ++ blr ++.long 0 ++.byte 0,12,4,1,0x80,0,3,0 ++.long 0 ++.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key ++.globl aes_hw_encrypt ++.type aes_hw_encrypt,@function ++.align 5 ++aes_hw_encrypt: ++.localentry aes_hw_encrypt,0 ++ ++ lwz 6,240(5) ++ lis 0,0xfc00 ++ li 12,-1 ++ li 7,15 ++ or 0,0,0 ++ ++ lvx 0,0,3 ++ neg 11,4 ++ lvx 1,7,3 ++ lvsl 2,0,3 ++ vspltisb 4,0x0f ++ lvsr 3,0,11 ++ vxor 2,2,4 ++ li 7,16 ++ vperm 0,0,1,2 ++ lvx 1,0,5 ++ lvsr 5,0,5 ++ srwi 6,6,1 ++ lvx 2,7,5 ++ addi 7,7,16 ++ subi 6,6,1 ++ vperm 1,2,1,5 ++ ++ vxor 0,0,1 ++ lvx 1,7,5 ++ addi 7,7,16 ++ mtctr 6 ++ ++.Loop_enc: ++ vperm 2,1,2,5 ++ .long 0x10001508 ++ lvx 2,7,5 ++ addi 7,7,16 ++ vperm 1,2,1,5 ++ .long 0x10000D08 ++ lvx 1,7,5 ++ addi 7,7,16 ++ bdnz .Loop_enc ++ ++ vperm 2,1,2,5 ++ .long 0x10001508 ++ lvx 2,7,5 ++ vperm 1,2,1,5 ++ .long 0x10000D09 ++ ++ vspltisb 2,-1 ++ vxor 1,1,1 ++ li 7,15 ++ vperm 2,2,1,3 ++ vxor 3,3,4 ++ lvx 1,0,4 ++ vperm 0,0,0,3 ++ vsel 1,1,0,2 ++ lvx 4,7,4 ++ stvx 1,0,4 ++ vsel 0,0,4,2 ++ stvx 0,7,4 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,3,0 ++.long 0 ++.size aes_hw_encrypt,.-aes_hw_encrypt ++.globl aes_hw_decrypt ++.type aes_hw_decrypt,@function ++.align 5 ++aes_hw_decrypt: ++.localentry aes_hw_decrypt,0 ++ ++ lwz 6,240(5) ++ lis 0,0xfc00 ++ li 12,-1 ++ li 7,15 ++ or 0,0,0 ++ ++ lvx 0,0,3 ++ neg 11,4 ++ lvx 1,7,3 ++ lvsl 2,0,3 ++ vspltisb 4,0x0f ++ lvsr 3,0,11 ++ vxor 2,2,4 ++ li 7,16 ++ vperm 0,0,1,2 ++ lvx 1,0,5 ++ lvsr 5,0,5 ++ srwi 6,6,1 ++ lvx 2,7,5 ++ addi 7,7,16 ++ subi 6,6,1 ++ vperm 1,2,1,5 ++ ++ vxor 0,0,1 ++ lvx 1,7,5 ++ addi 7,7,16 ++ mtctr 6 ++ ++.Loop_dec: ++ vperm 2,1,2,5 ++ .long 0x10001548 ++ lvx 2,7,5 ++ addi 7,7,16 ++ vperm 1,2,1,5 ++ .long 0x10000D48 ++ lvx 1,7,5 ++ addi 7,7,16 ++ bdnz .Loop_dec ++ ++ vperm 2,1,2,5 ++ .long 0x10001548 ++ lvx 2,7,5 ++ vperm 1,2,1,5 ++ .long 0x10000D49 ++ ++ vspltisb 2,-1 ++ vxor 1,1,1 ++ li 7,15 ++ vperm 2,2,1,3 ++ vxor 3,3,4 ++ lvx 1,0,4 ++ vperm 0,0,0,3 ++ vsel 1,1,0,2 ++ lvx 4,7,4 ++ stvx 1,0,4 ++ vsel 0,0,4,2 ++ stvx 0,7,4 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,3,0 ++.long 0 ++.size aes_hw_decrypt,.-aes_hw_decrypt ++.globl aes_hw_cbc_encrypt ++.type aes_hw_cbc_encrypt,@function ++.align 5 ++aes_hw_cbc_encrypt: ++.localentry aes_hw_cbc_encrypt,0 ++ ++ cmpldi 5,16 ++ .long 0x4dc00020 ++ ++ cmpwi 8,0 ++ lis 0,0xffe0 ++ li 12,-1 ++ or 0,0,0 ++ ++ li 10,15 ++ vxor 0,0,0 ++ vspltisb 3,0x0f ++ ++ lvx 4,0,7 ++ lvsl 6,0,7 ++ lvx 5,10,7 ++ vxor 6,6,3 ++ vperm 4,4,5,6 ++ ++ neg 11,3 ++ lvsr 10,0,6 ++ lwz 9,240(6) ++ ++ lvsr 6,0,11 ++ lvx 5,0,3 ++ addi 3,3,15 ++ vxor 6,6,3 ++ ++ lvsl 8,0,4 ++ vspltisb 9,-1 ++ lvx 7,0,4 ++ vperm 9,9,0,8 ++ vxor 8,8,3 ++ ++ srwi 9,9,1 ++ li 10,16 ++ subi 9,9,1 ++ beq .Lcbc_dec ++ ++.Lcbc_enc: ++ vor 2,5,5 ++ lvx 5,0,3 ++ addi 3,3,16 ++ mtctr 9 ++ subi 5,5,16 ++ ++ lvx 0,0,6 ++ vperm 2,2,5,6 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ vxor 2,2,0 ++ lvx 0,10,6 ++ addi 10,10,16 ++ vxor 2,2,4 ++ ++.Loop_cbc_enc: ++ vperm 1,0,1,10 ++ .long 0x10420D08 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ .long 0x10420508 ++ lvx 0,10,6 ++ addi 10,10,16 ++ bdnz .Loop_cbc_enc ++ ++ vperm 1,0,1,10 ++ .long 0x10420D08 ++ lvx 1,10,6 ++ li 10,16 ++ vperm 0,1,0,10 ++ .long 0x10820509 ++ cmpldi 5,16 ++ ++ vperm 3,4,4,8 ++ vsel 2,7,3,9 ++ vor 7,3,3 ++ stvx 2,0,4 ++ addi 4,4,16 ++ bge .Lcbc_enc ++ ++ b .Lcbc_done ++ ++.align 4 ++.Lcbc_dec: ++ cmpldi 5,128 ++ bge _aesp8_cbc_decrypt8x ++ vor 3,5,5 ++ lvx 5,0,3 ++ addi 3,3,16 ++ mtctr 9 ++ subi 5,5,16 ++ ++ lvx 0,0,6 ++ vperm 3,3,5,6 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ vxor 2,3,0 ++ lvx 0,10,6 ++ addi 10,10,16 ++ ++.Loop_cbc_dec: ++ vperm 1,0,1,10 ++ .long 0x10420D48 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ .long 0x10420548 ++ lvx 0,10,6 ++ addi 10,10,16 ++ bdnz .Loop_cbc_dec ++ ++ vperm 1,0,1,10 ++ .long 0x10420D48 ++ lvx 1,10,6 ++ li 10,16 ++ vperm 0,1,0,10 ++ .long 0x10420549 ++ cmpldi 5,16 ++ ++ vxor 2,2,4 ++ vor 4,3,3 ++ vperm 3,2,2,8 ++ vsel 2,7,3,9 ++ vor 7,3,3 ++ stvx 2,0,4 ++ addi 4,4,16 ++ bge .Lcbc_dec ++ ++.Lcbc_done: ++ addi 4,4,-1 ++ lvx 2,0,4 ++ vsel 2,7,2,9 ++ stvx 2,0,4 ++ ++ neg 8,7 ++ li 10,15 ++ vxor 0,0,0 ++ vspltisb 9,-1 ++ vspltisb 3,0x0f ++ lvsr 8,0,8 ++ vperm 9,9,0,8 ++ vxor 8,8,3 ++ lvx 7,0,7 ++ vperm 4,4,4,8 ++ vsel 2,7,4,9 ++ lvx 5,10,7 ++ stvx 2,0,7 ++ vsel 2,4,5,9 ++ stvx 2,10,7 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,6,0 ++.long 0 ++.align 5 ++_aesp8_cbc_decrypt8x: ++ stdu 1,-448(1) ++ li 10,207 ++ li 11,223 ++ stvx 20,10,1 ++ addi 10,10,32 ++ stvx 21,11,1 ++ addi 11,11,32 ++ stvx 22,10,1 ++ addi 10,10,32 ++ stvx 23,11,1 ++ addi 11,11,32 ++ stvx 24,10,1 ++ addi 10,10,32 ++ stvx 25,11,1 ++ addi 11,11,32 ++ stvx 26,10,1 ++ addi 10,10,32 ++ stvx 27,11,1 ++ addi 11,11,32 ++ stvx 28,10,1 ++ addi 10,10,32 ++ stvx 29,11,1 ++ addi 11,11,32 ++ stvx 30,10,1 ++ stvx 31,11,1 ++ li 0,-1 ++ stw 12,396(1) ++ li 8,0x10 ++ std 26,400(1) ++ li 26,0x20 ++ std 27,408(1) ++ li 27,0x30 ++ std 28,416(1) ++ li 28,0x40 ++ std 29,424(1) ++ li 29,0x50 ++ std 30,432(1) ++ li 30,0x60 ++ std 31,440(1) ++ li 31,0x70 ++ or 0,0,0 ++ ++ subi 9,9,3 ++ subi 5,5,128 ++ ++ lvx 23,0,6 ++ lvx 30,8,6 ++ addi 6,6,0x20 ++ lvx 31,0,6 ++ vperm 23,30,23,10 ++ addi 11,1,79 ++ mtctr 9 ++ ++.Load_cbc_dec_key: ++ vperm 24,31,30,10 ++ lvx 30,8,6 ++ addi 6,6,0x20 ++ stvx 24,0,11 ++ vperm 25,30,31,10 ++ lvx 31,0,6 ++ stvx 25,8,11 ++ addi 11,11,0x20 ++ bdnz .Load_cbc_dec_key ++ ++ lvx 26,8,6 ++ vperm 24,31,30,10 ++ lvx 27,26,6 ++ stvx 24,0,11 ++ vperm 25,26,31,10 ++ lvx 28,27,6 ++ stvx 25,8,11 ++ addi 11,1,79 ++ vperm 26,27,26,10 ++ lvx 29,28,6 ++ vperm 27,28,27,10 ++ lvx 30,29,6 ++ vperm 28,29,28,10 ++ lvx 31,30,6 ++ vperm 29,30,29,10 ++ lvx 14,31,6 ++ vperm 30,31,30,10 ++ lvx 24,0,11 ++ vperm 31,14,31,10 ++ lvx 25,8,11 ++ ++ ++ ++ subi 3,3,15 ++ ++ li 10,8 ++ .long 0x7C001E99 ++ lvsl 6,0,10 ++ vspltisb 3,0x0f ++ .long 0x7C281E99 ++ vxor 6,6,3 ++ .long 0x7C5A1E99 ++ vperm 0,0,0,6 ++ .long 0x7C7B1E99 ++ vperm 1,1,1,6 ++ .long 0x7D5C1E99 ++ vperm 2,2,2,6 ++ vxor 14,0,23 ++ .long 0x7D7D1E99 ++ vperm 3,3,3,6 ++ vxor 15,1,23 ++ .long 0x7D9E1E99 ++ vperm 10,10,10,6 ++ vxor 16,2,23 ++ .long 0x7DBF1E99 ++ addi 3,3,0x80 ++ vperm 11,11,11,6 ++ vxor 17,3,23 ++ vperm 12,12,12,6 ++ vxor 18,10,23 ++ vperm 13,13,13,6 ++ vxor 19,11,23 ++ vxor 20,12,23 ++ vxor 21,13,23 ++ ++ mtctr 9 ++ b .Loop_cbc_dec8x ++.align 5 ++.Loop_cbc_dec8x: ++ .long 0x11CEC548 ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ .long 0x1231C548 ++ .long 0x1252C548 ++ .long 0x1273C548 ++ .long 0x1294C548 ++ .long 0x12B5C548 ++ lvx 24,26,11 ++ addi 11,11,0x20 ++ ++ .long 0x11CECD48 ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ .long 0x1231CD48 ++ .long 0x1252CD48 ++ .long 0x1273CD48 ++ .long 0x1294CD48 ++ .long 0x12B5CD48 ++ lvx 25,8,11 ++ bdnz .Loop_cbc_dec8x ++ ++ subic 5,5,128 ++ .long 0x11CEC548 ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ .long 0x1231C548 ++ .long 0x1252C548 ++ .long 0x1273C548 ++ .long 0x1294C548 ++ .long 0x12B5C548 ++ ++ subfe. 0,0,0 ++ .long 0x11CECD48 ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ .long 0x1231CD48 ++ .long 0x1252CD48 ++ .long 0x1273CD48 ++ .long 0x1294CD48 ++ .long 0x12B5CD48 ++ ++ and 0,0,5 ++ .long 0x11CED548 ++ .long 0x11EFD548 ++ .long 0x1210D548 ++ .long 0x1231D548 ++ .long 0x1252D548 ++ .long 0x1273D548 ++ .long 0x1294D548 ++ .long 0x12B5D548 ++ ++ add 3,3,0 ++ ++ ++ ++ .long 0x11CEDD48 ++ .long 0x11EFDD48 ++ .long 0x1210DD48 ++ .long 0x1231DD48 ++ .long 0x1252DD48 ++ .long 0x1273DD48 ++ .long 0x1294DD48 ++ .long 0x12B5DD48 ++ ++ addi 11,1,79 ++ .long 0x11CEE548 ++ .long 0x11EFE548 ++ .long 0x1210E548 ++ .long 0x1231E548 ++ .long 0x1252E548 ++ .long 0x1273E548 ++ .long 0x1294E548 ++ .long 0x12B5E548 ++ lvx 24,0,11 ++ ++ .long 0x11CEED48 ++ .long 0x11EFED48 ++ .long 0x1210ED48 ++ .long 0x1231ED48 ++ .long 0x1252ED48 ++ .long 0x1273ED48 ++ .long 0x1294ED48 ++ .long 0x12B5ED48 ++ lvx 25,8,11 ++ ++ .long 0x11CEF548 ++ vxor 4,4,31 ++ .long 0x11EFF548 ++ vxor 0,0,31 ++ .long 0x1210F548 ++ vxor 1,1,31 ++ .long 0x1231F548 ++ vxor 2,2,31 ++ .long 0x1252F548 ++ vxor 3,3,31 ++ .long 0x1273F548 ++ vxor 10,10,31 ++ .long 0x1294F548 ++ vxor 11,11,31 ++ .long 0x12B5F548 ++ vxor 12,12,31 ++ ++ .long 0x11CE2549 ++ .long 0x11EF0549 ++ .long 0x7C001E99 ++ .long 0x12100D49 ++ .long 0x7C281E99 ++ .long 0x12311549 ++ vperm 0,0,0,6 ++ .long 0x7C5A1E99 ++ .long 0x12521D49 ++ vperm 1,1,1,6 ++ .long 0x7C7B1E99 ++ .long 0x12735549 ++ vperm 2,2,2,6 ++ .long 0x7D5C1E99 ++ .long 0x12945D49 ++ vperm 3,3,3,6 ++ .long 0x7D7D1E99 ++ .long 0x12B56549 ++ vperm 10,10,10,6 ++ .long 0x7D9E1E99 ++ vor 4,13,13 ++ vperm 11,11,11,6 ++ .long 0x7DBF1E99 ++ addi 3,3,0x80 ++ ++ vperm 14,14,14,6 ++ vperm 15,15,15,6 ++ .long 0x7DC02799 ++ vperm 12,12,12,6 ++ vxor 14,0,23 ++ vperm 16,16,16,6 ++ .long 0x7DE82799 ++ vperm 13,13,13,6 ++ vxor 15,1,23 ++ vperm 17,17,17,6 ++ .long 0x7E1A2799 ++ vxor 16,2,23 ++ vperm 18,18,18,6 ++ .long 0x7E3B2799 ++ vxor 17,3,23 ++ vperm 19,19,19,6 ++ .long 0x7E5C2799 ++ vxor 18,10,23 ++ vperm 20,20,20,6 ++ .long 0x7E7D2799 ++ vxor 19,11,23 ++ vperm 21,21,21,6 ++ .long 0x7E9E2799 ++ vxor 20,12,23 ++ .long 0x7EBF2799 ++ addi 4,4,0x80 ++ vxor 21,13,23 ++ ++ mtctr 9 ++ beq .Loop_cbc_dec8x ++ ++ addic. 5,5,128 ++ beq .Lcbc_dec8x_done ++ nop ++ nop ++ ++.Loop_cbc_dec8x_tail: ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ .long 0x1231C548 ++ .long 0x1252C548 ++ .long 0x1273C548 ++ .long 0x1294C548 ++ .long 0x12B5C548 ++ lvx 24,26,11 ++ addi 11,11,0x20 ++ ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ .long 0x1231CD48 ++ .long 0x1252CD48 ++ .long 0x1273CD48 ++ .long 0x1294CD48 ++ .long 0x12B5CD48 ++ lvx 25,8,11 ++ bdnz .Loop_cbc_dec8x_tail ++ ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ .long 0x1231C548 ++ .long 0x1252C548 ++ .long 0x1273C548 ++ .long 0x1294C548 ++ .long 0x12B5C548 ++ ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ .long 0x1231CD48 ++ .long 0x1252CD48 ++ .long 0x1273CD48 ++ .long 0x1294CD48 ++ .long 0x12B5CD48 ++ ++ .long 0x11EFD548 ++ .long 0x1210D548 ++ .long 0x1231D548 ++ .long 0x1252D548 ++ .long 0x1273D548 ++ .long 0x1294D548 ++ .long 0x12B5D548 ++ ++ .long 0x11EFDD48 ++ .long 0x1210DD48 ++ .long 0x1231DD48 ++ .long 0x1252DD48 ++ .long 0x1273DD48 ++ .long 0x1294DD48 ++ .long 0x12B5DD48 ++ ++ .long 0x11EFE548 ++ .long 0x1210E548 ++ .long 0x1231E548 ++ .long 0x1252E548 ++ .long 0x1273E548 ++ .long 0x1294E548 ++ .long 0x12B5E548 ++ ++ .long 0x11EFED48 ++ .long 0x1210ED48 ++ .long 0x1231ED48 ++ .long 0x1252ED48 ++ .long 0x1273ED48 ++ .long 0x1294ED48 ++ .long 0x12B5ED48 ++ ++ .long 0x11EFF548 ++ vxor 4,4,31 ++ .long 0x1210F548 ++ vxor 1,1,31 ++ .long 0x1231F548 ++ vxor 2,2,31 ++ .long 0x1252F548 ++ vxor 3,3,31 ++ .long 0x1273F548 ++ vxor 10,10,31 ++ .long 0x1294F548 ++ vxor 11,11,31 ++ .long 0x12B5F548 ++ vxor 12,12,31 ++ ++ cmplwi 5,32 ++ blt .Lcbc_dec8x_one ++ nop ++ beq .Lcbc_dec8x_two ++ cmplwi 5,64 ++ blt .Lcbc_dec8x_three ++ nop ++ beq .Lcbc_dec8x_four ++ cmplwi 5,96 ++ blt .Lcbc_dec8x_five ++ nop ++ beq .Lcbc_dec8x_six ++ ++.Lcbc_dec8x_seven: ++ .long 0x11EF2549 ++ .long 0x12100D49 ++ .long 0x12311549 ++ .long 0x12521D49 ++ .long 0x12735549 ++ .long 0x12945D49 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ vperm 19,19,19,6 ++ .long 0x7E5B2799 ++ vperm 20,20,20,6 ++ .long 0x7E7C2799 ++ vperm 21,21,21,6 ++ .long 0x7E9D2799 ++ .long 0x7EBE2799 ++ addi 4,4,0x70 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_six: ++ .long 0x12102549 ++ .long 0x12311549 ++ .long 0x12521D49 ++ .long 0x12735549 ++ .long 0x12945D49 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 16,16,16,6 ++ vperm 17,17,17,6 ++ .long 0x7E002799 ++ vperm 18,18,18,6 ++ .long 0x7E282799 ++ vperm 19,19,19,6 ++ .long 0x7E5A2799 ++ vperm 20,20,20,6 ++ .long 0x7E7B2799 ++ vperm 21,21,21,6 ++ .long 0x7E9C2799 ++ .long 0x7EBD2799 ++ addi 4,4,0x60 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_five: ++ .long 0x12312549 ++ .long 0x12521D49 ++ .long 0x12735549 ++ .long 0x12945D49 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 17,17,17,6 ++ vperm 18,18,18,6 ++ .long 0x7E202799 ++ vperm 19,19,19,6 ++ .long 0x7E482799 ++ vperm 20,20,20,6 ++ .long 0x7E7A2799 ++ vperm 21,21,21,6 ++ .long 0x7E9B2799 ++ .long 0x7EBC2799 ++ addi 4,4,0x50 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_four: ++ .long 0x12522549 ++ .long 0x12735549 ++ .long 0x12945D49 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 18,18,18,6 ++ vperm 19,19,19,6 ++ .long 0x7E402799 ++ vperm 20,20,20,6 ++ .long 0x7E682799 ++ vperm 21,21,21,6 ++ .long 0x7E9A2799 ++ .long 0x7EBB2799 ++ addi 4,4,0x40 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_three: ++ .long 0x12732549 ++ .long 0x12945D49 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 19,19,19,6 ++ vperm 20,20,20,6 ++ .long 0x7E602799 ++ vperm 21,21,21,6 ++ .long 0x7E882799 ++ .long 0x7EBA2799 ++ addi 4,4,0x30 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_two: ++ .long 0x12942549 ++ .long 0x12B56549 ++ vor 4,13,13 ++ ++ vperm 20,20,20,6 ++ vperm 21,21,21,6 ++ .long 0x7E802799 ++ .long 0x7EA82799 ++ addi 4,4,0x20 ++ b .Lcbc_dec8x_done ++ ++.align 5 ++.Lcbc_dec8x_one: ++ .long 0x12B52549 ++ vor 4,13,13 ++ ++ vperm 21,21,21,6 ++ .long 0x7EA02799 ++ addi 4,4,0x10 ++ ++.Lcbc_dec8x_done: ++ vperm 4,4,4,6 ++ .long 0x7C803F99 ++ ++ li 10,79 ++ li 11,95 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ ++ or 12,12,12 ++ lvx 20,10,1 ++ addi 10,10,32 ++ lvx 21,11,1 ++ addi 11,11,32 ++ lvx 22,10,1 ++ addi 10,10,32 ++ lvx 23,11,1 ++ addi 11,11,32 ++ lvx 24,10,1 ++ addi 10,10,32 ++ lvx 25,11,1 ++ addi 11,11,32 ++ lvx 26,10,1 ++ addi 10,10,32 ++ lvx 27,11,1 ++ addi 11,11,32 ++ lvx 28,10,1 ++ addi 10,10,32 ++ lvx 29,11,1 ++ addi 11,11,32 ++ lvx 30,10,1 ++ lvx 31,11,1 ++ ld 26,400(1) ++ ld 27,408(1) ++ ld 28,416(1) ++ ld 29,424(1) ++ ld 30,432(1) ++ ld 31,440(1) ++ addi 1,1,448 ++ blr ++.long 0 ++.byte 0,12,0x04,0,0x80,6,6,0 ++.long 0 ++.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt ++.globl aes_hw_ctr32_encrypt_blocks ++.type aes_hw_ctr32_encrypt_blocks,@function ++.align 5 ++aes_hw_ctr32_encrypt_blocks: ++.localentry aes_hw_ctr32_encrypt_blocks,0 ++ ++ cmpldi 5,1 ++ .long 0x4dc00020 ++ ++ lis 0,0xfff0 ++ li 12,-1 ++ or 0,0,0 ++ ++ li 10,15 ++ vxor 0,0,0 ++ vspltisb 3,0x0f ++ ++ lvx 4,0,7 ++ lvsl 6,0,7 ++ lvx 5,10,7 ++ vspltisb 11,1 ++ vxor 6,6,3 ++ vperm 4,4,5,6 ++ vsldoi 11,0,11,1 ++ ++ neg 11,3 ++ lvsr 10,0,6 ++ lwz 9,240(6) ++ ++ lvsr 6,0,11 ++ lvx 5,0,3 ++ addi 3,3,15 ++ vxor 6,6,3 ++ ++ srwi 9,9,1 ++ li 10,16 ++ subi 9,9,1 ++ ++ cmpldi 5,8 ++ bge _aesp8_ctr32_encrypt8x ++ ++ lvsl 8,0,4 ++ vspltisb 9,-1 ++ lvx 7,0,4 ++ vperm 9,9,0,8 ++ vxor 8,8,3 ++ ++ lvx 0,0,6 ++ mtctr 9 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ vxor 2,4,0 ++ lvx 0,10,6 ++ addi 10,10,16 ++ b .Loop_ctr32_enc ++ ++.align 5 ++.Loop_ctr32_enc: ++ vperm 1,0,1,10 ++ .long 0x10420D08 ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 0,1,0,10 ++ .long 0x10420508 ++ lvx 0,10,6 ++ addi 10,10,16 ++ bdnz .Loop_ctr32_enc ++ ++ vadduwm 4,4,11 ++ vor 3,5,5 ++ lvx 5,0,3 ++ addi 3,3,16 ++ subic. 5,5,1 ++ ++ vperm 1,0,1,10 ++ .long 0x10420D08 ++ lvx 1,10,6 ++ vperm 3,3,5,6 ++ li 10,16 ++ vperm 1,1,0,10 ++ lvx 0,0,6 ++ vxor 3,3,1 ++ .long 0x10421D09 ++ ++ lvx 1,10,6 ++ addi 10,10,16 ++ vperm 2,2,2,8 ++ vsel 3,7,2,9 ++ mtctr 9 ++ vperm 0,1,0,10 ++ vor 7,2,2 ++ vxor 2,4,0 ++ lvx 0,10,6 ++ addi 10,10,16 ++ stvx 3,0,4 ++ addi 4,4,16 ++ bne .Loop_ctr32_enc ++ ++ addi 4,4,-1 ++ lvx 2,0,4 ++ vsel 2,7,2,9 ++ stvx 2,0,4 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,6,0 ++.long 0 ++.align 5 ++_aesp8_ctr32_encrypt8x: ++ stdu 1,-448(1) ++ li 10,207 ++ li 11,223 ++ stvx 20,10,1 ++ addi 10,10,32 ++ stvx 21,11,1 ++ addi 11,11,32 ++ stvx 22,10,1 ++ addi 10,10,32 ++ stvx 23,11,1 ++ addi 11,11,32 ++ stvx 24,10,1 ++ addi 10,10,32 ++ stvx 25,11,1 ++ addi 11,11,32 ++ stvx 26,10,1 ++ addi 10,10,32 ++ stvx 27,11,1 ++ addi 11,11,32 ++ stvx 28,10,1 ++ addi 10,10,32 ++ stvx 29,11,1 ++ addi 11,11,32 ++ stvx 30,10,1 ++ stvx 31,11,1 ++ li 0,-1 ++ stw 12,396(1) ++ li 8,0x10 ++ std 26,400(1) ++ li 26,0x20 ++ std 27,408(1) ++ li 27,0x30 ++ std 28,416(1) ++ li 28,0x40 ++ std 29,424(1) ++ li 29,0x50 ++ std 30,432(1) ++ li 30,0x60 ++ std 31,440(1) ++ li 31,0x70 ++ or 0,0,0 ++ ++ subi 9,9,3 ++ ++ lvx 23,0,6 ++ lvx 30,8,6 ++ addi 6,6,0x20 ++ lvx 31,0,6 ++ vperm 23,30,23,10 ++ addi 11,1,79 ++ mtctr 9 ++ ++.Load_ctr32_enc_key: ++ vperm 24,31,30,10 ++ lvx 30,8,6 ++ addi 6,6,0x20 ++ stvx 24,0,11 ++ vperm 25,30,31,10 ++ lvx 31,0,6 ++ stvx 25,8,11 ++ addi 11,11,0x20 ++ bdnz .Load_ctr32_enc_key ++ ++ lvx 26,8,6 ++ vperm 24,31,30,10 ++ lvx 27,26,6 ++ stvx 24,0,11 ++ vperm 25,26,31,10 ++ lvx 28,27,6 ++ stvx 25,8,11 ++ addi 11,1,79 ++ vperm 26,27,26,10 ++ lvx 29,28,6 ++ vperm 27,28,27,10 ++ lvx 30,29,6 ++ vperm 28,29,28,10 ++ lvx 31,30,6 ++ vperm 29,30,29,10 ++ lvx 15,31,6 ++ vperm 30,31,30,10 ++ lvx 24,0,11 ++ vperm 31,15,31,10 ++ lvx 25,8,11 ++ ++ vadduwm 7,11,11 ++ subi 3,3,15 ++ sldi 5,5,4 ++ ++ vadduwm 16,4,11 ++ vadduwm 17,4,7 ++ vxor 15,4,23 ++ li 10,8 ++ vadduwm 18,16,7 ++ vxor 16,16,23 ++ lvsl 6,0,10 ++ vadduwm 19,17,7 ++ vxor 17,17,23 ++ vspltisb 3,0x0f ++ vadduwm 20,18,7 ++ vxor 18,18,23 ++ vxor 6,6,3 ++ vadduwm 21,19,7 ++ vxor 19,19,23 ++ vadduwm 22,20,7 ++ vxor 20,20,23 ++ vadduwm 4,21,7 ++ vxor 21,21,23 ++ vxor 22,22,23 ++ ++ mtctr 9 ++ b .Loop_ctr32_enc8x ++.align 5 ++.Loop_ctr32_enc8x: ++ .long 0x11EFC508 ++ .long 0x1210C508 ++ .long 0x1231C508 ++ .long 0x1252C508 ++ .long 0x1273C508 ++ .long 0x1294C508 ++ .long 0x12B5C508 ++ .long 0x12D6C508 ++.Loop_ctr32_enc8x_middle: ++ lvx 24,26,11 ++ addi 11,11,0x20 ++ ++ .long 0x11EFCD08 ++ .long 0x1210CD08 ++ .long 0x1231CD08 ++ .long 0x1252CD08 ++ .long 0x1273CD08 ++ .long 0x1294CD08 ++ .long 0x12B5CD08 ++ .long 0x12D6CD08 ++ lvx 25,8,11 ++ bdnz .Loop_ctr32_enc8x ++ ++ subic 11,5,256 ++ .long 0x11EFC508 ++ .long 0x1210C508 ++ .long 0x1231C508 ++ .long 0x1252C508 ++ .long 0x1273C508 ++ .long 0x1294C508 ++ .long 0x12B5C508 ++ .long 0x12D6C508 ++ ++ subfe 0,0,0 ++ .long 0x11EFCD08 ++ .long 0x1210CD08 ++ .long 0x1231CD08 ++ .long 0x1252CD08 ++ .long 0x1273CD08 ++ .long 0x1294CD08 ++ .long 0x12B5CD08 ++ .long 0x12D6CD08 ++ ++ and 0,0,11 ++ addi 11,1,79 ++ .long 0x11EFD508 ++ .long 0x1210D508 ++ .long 0x1231D508 ++ .long 0x1252D508 ++ .long 0x1273D508 ++ .long 0x1294D508 ++ .long 0x12B5D508 ++ .long 0x12D6D508 ++ lvx 24,0,11 ++ ++ subic 5,5,129 ++ .long 0x11EFDD08 ++ addi 5,5,1 ++ .long 0x1210DD08 ++ .long 0x1231DD08 ++ .long 0x1252DD08 ++ .long 0x1273DD08 ++ .long 0x1294DD08 ++ .long 0x12B5DD08 ++ .long 0x12D6DD08 ++ lvx 25,8,11 ++ ++ .long 0x11EFE508 ++ .long 0x7C001E99 ++ .long 0x1210E508 ++ .long 0x7C281E99 ++ .long 0x1231E508 ++ .long 0x7C5A1E99 ++ .long 0x1252E508 ++ .long 0x7C7B1E99 ++ .long 0x1273E508 ++ .long 0x7D5C1E99 ++ .long 0x1294E508 ++ .long 0x7D9D1E99 ++ .long 0x12B5E508 ++ .long 0x7DBE1E99 ++ .long 0x12D6E508 ++ .long 0x7DDF1E99 ++ addi 3,3,0x80 ++ ++ .long 0x11EFED08 ++ vperm 0,0,0,6 ++ .long 0x1210ED08 ++ vperm 1,1,1,6 ++ .long 0x1231ED08 ++ vperm 2,2,2,6 ++ .long 0x1252ED08 ++ vperm 3,3,3,6 ++ .long 0x1273ED08 ++ vperm 10,10,10,6 ++ .long 0x1294ED08 ++ vperm 12,12,12,6 ++ .long 0x12B5ED08 ++ vperm 13,13,13,6 ++ .long 0x12D6ED08 ++ vperm 14,14,14,6 ++ ++ add 3,3,0 ++ ++ ++ ++ subfe. 0,0,0 ++ .long 0x11EFF508 ++ vxor 0,0,31 ++ .long 0x1210F508 ++ vxor 1,1,31 ++ .long 0x1231F508 ++ vxor 2,2,31 ++ .long 0x1252F508 ++ vxor 3,3,31 ++ .long 0x1273F508 ++ vxor 10,10,31 ++ .long 0x1294F508 ++ vxor 12,12,31 ++ .long 0x12B5F508 ++ vxor 13,13,31 ++ .long 0x12D6F508 ++ vxor 14,14,31 ++ ++ bne .Lctr32_enc8x_break ++ ++ .long 0x100F0509 ++ .long 0x10300D09 ++ vadduwm 16,4,11 ++ .long 0x10511509 ++ vadduwm 17,4,7 ++ vxor 15,4,23 ++ .long 0x10721D09 ++ vadduwm 18,16,7 ++ vxor 16,16,23 ++ .long 0x11535509 ++ vadduwm 19,17,7 ++ vxor 17,17,23 ++ .long 0x11946509 ++ vadduwm 20,18,7 ++ vxor 18,18,23 ++ .long 0x11B56D09 ++ vadduwm 21,19,7 ++ vxor 19,19,23 ++ .long 0x11D67509 ++ vadduwm 22,20,7 ++ vxor 20,20,23 ++ vperm 0,0,0,6 ++ vadduwm 4,21,7 ++ vxor 21,21,23 ++ vperm 1,1,1,6 ++ vxor 22,22,23 ++ mtctr 9 ++ ++ .long 0x11EFC508 ++ .long 0x7C002799 ++ vperm 2,2,2,6 ++ .long 0x1210C508 ++ .long 0x7C282799 ++ vperm 3,3,3,6 ++ .long 0x1231C508 ++ .long 0x7C5A2799 ++ vperm 10,10,10,6 ++ .long 0x1252C508 ++ .long 0x7C7B2799 ++ vperm 12,12,12,6 ++ .long 0x1273C508 ++ .long 0x7D5C2799 ++ vperm 13,13,13,6 ++ .long 0x1294C508 ++ .long 0x7D9D2799 ++ vperm 14,14,14,6 ++ .long 0x12B5C508 ++ .long 0x7DBE2799 ++ .long 0x12D6C508 ++ .long 0x7DDF2799 ++ addi 4,4,0x80 ++ ++ b .Loop_ctr32_enc8x_middle ++ ++.align 5 ++.Lctr32_enc8x_break: ++ cmpwi 5,-0x60 ++ blt .Lctr32_enc8x_one ++ nop ++ beq .Lctr32_enc8x_two ++ cmpwi 5,-0x40 ++ blt .Lctr32_enc8x_three ++ nop ++ beq .Lctr32_enc8x_four ++ cmpwi 5,-0x20 ++ blt .Lctr32_enc8x_five ++ nop ++ beq .Lctr32_enc8x_six ++ cmpwi 5,0x00 ++ blt .Lctr32_enc8x_seven ++ ++.Lctr32_enc8x_eight: ++ .long 0x11EF0509 ++ .long 0x12100D09 ++ .long 0x12311509 ++ .long 0x12521D09 ++ .long 0x12735509 ++ .long 0x12946509 ++ .long 0x12B56D09 ++ .long 0x12D67509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ vperm 19,19,19,6 ++ .long 0x7E5B2799 ++ vperm 20,20,20,6 ++ .long 0x7E7C2799 ++ vperm 21,21,21,6 ++ .long 0x7E9D2799 ++ vperm 22,22,22,6 ++ .long 0x7EBE2799 ++ .long 0x7EDF2799 ++ addi 4,4,0x80 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_seven: ++ .long 0x11EF0D09 ++ .long 0x12101509 ++ .long 0x12311D09 ++ .long 0x12525509 ++ .long 0x12736509 ++ .long 0x12946D09 ++ .long 0x12B57509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ vperm 19,19,19,6 ++ .long 0x7E5B2799 ++ vperm 20,20,20,6 ++ .long 0x7E7C2799 ++ vperm 21,21,21,6 ++ .long 0x7E9D2799 ++ .long 0x7EBE2799 ++ addi 4,4,0x70 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_six: ++ .long 0x11EF1509 ++ .long 0x12101D09 ++ .long 0x12315509 ++ .long 0x12526509 ++ .long 0x12736D09 ++ .long 0x12947509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ vperm 19,19,19,6 ++ .long 0x7E5B2799 ++ vperm 20,20,20,6 ++ .long 0x7E7C2799 ++ .long 0x7E9D2799 ++ addi 4,4,0x60 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_five: ++ .long 0x11EF1D09 ++ .long 0x12105509 ++ .long 0x12316509 ++ .long 0x12526D09 ++ .long 0x12737509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ vperm 19,19,19,6 ++ .long 0x7E5B2799 ++ .long 0x7E7C2799 ++ addi 4,4,0x50 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_four: ++ .long 0x11EF5509 ++ .long 0x12106509 ++ .long 0x12316D09 ++ .long 0x12527509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ vperm 18,18,18,6 ++ .long 0x7E3A2799 ++ .long 0x7E5B2799 ++ addi 4,4,0x40 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_three: ++ .long 0x11EF6509 ++ .long 0x12106D09 ++ .long 0x12317509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ vperm 17,17,17,6 ++ .long 0x7E082799 ++ .long 0x7E3A2799 ++ addi 4,4,0x30 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_two: ++ .long 0x11EF6D09 ++ .long 0x12107509 ++ ++ vperm 15,15,15,6 ++ vperm 16,16,16,6 ++ .long 0x7DE02799 ++ .long 0x7E082799 ++ addi 4,4,0x20 ++ b .Lctr32_enc8x_done ++ ++.align 5 ++.Lctr32_enc8x_one: ++ .long 0x11EF7509 ++ ++ vperm 15,15,15,6 ++ .long 0x7DE02799 ++ addi 4,4,0x10 ++ ++.Lctr32_enc8x_done: ++ li 10,79 ++ li 11,95 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ stvx 6,10,1 ++ addi 10,10,32 ++ stvx 6,11,1 ++ addi 11,11,32 ++ ++ or 12,12,12 ++ lvx 20,10,1 ++ addi 10,10,32 ++ lvx 21,11,1 ++ addi 11,11,32 ++ lvx 22,10,1 ++ addi 10,10,32 ++ lvx 23,11,1 ++ addi 11,11,32 ++ lvx 24,10,1 ++ addi 10,10,32 ++ lvx 25,11,1 ++ addi 11,11,32 ++ lvx 26,10,1 ++ addi 10,10,32 ++ lvx 27,11,1 ++ addi 11,11,32 ++ lvx 28,10,1 ++ addi 10,10,32 ++ lvx 29,11,1 ++ addi 11,11,32 ++ lvx 30,10,1 ++ lvx 31,11,1 ++ ld 26,400(1) ++ ld 27,408(1) ++ ld 28,416(1) ++ ld 29,424(1) ++ ld 30,432(1) ++ ld 31,440(1) ++ addi 1,1,448 ++ blr ++.long 0 ++.byte 0,12,0x04,0,0x80,6,6,0 ++.long 0 ++.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks ++.globl aes_hw_xts_encrypt ++.type aes_hw_xts_encrypt,@function ++.align 5 ++aes_hw_xts_encrypt: ++.localentry aes_hw_xts_encrypt,0 ++ ++ mr 10,3 ++ li 3,-1 ++ cmpldi 5,16 ++ .long 0x4dc00020 ++ ++ lis 0,0xfff0 ++ li 12,-1 ++ li 11,0 ++ or 0,0,0 ++ ++ vspltisb 9,0x07 ++ lvsl 6,11,11 ++ vspltisb 11,0x0f ++ vxor 6,6,9 ++ ++ li 3,15 ++ lvx 8,0,8 ++ lvsl 5,0,8 ++ lvx 4,3,8 ++ vxor 5,5,11 ++ vperm 8,8,4,5 ++ ++ neg 11,10 ++ lvsr 5,0,11 ++ lvx 2,0,10 ++ addi 10,10,15 ++ vxor 5,5,11 ++ ++ cmpldi 7,0 ++ beq .Lxts_enc_no_key2 ++ ++ lvsr 7,0,7 ++ lwz 9,240(7) ++ srwi 9,9,1 ++ subi 9,9,1 ++ li 3,16 ++ ++ lvx 0,0,7 ++ lvx 1,3,7 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ vxor 8,8,0 ++ lvx 0,3,7 ++ addi 3,3,16 ++ mtctr 9 ++ ++.Ltweak_xts_enc: ++ vperm 1,0,1,7 ++ .long 0x11080D08 ++ lvx 1,3,7 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ .long 0x11080508 ++ lvx 0,3,7 ++ addi 3,3,16 ++ bdnz .Ltweak_xts_enc ++ ++ vperm 1,0,1,7 ++ .long 0x11080D08 ++ lvx 1,3,7 ++ vperm 0,1,0,7 ++ .long 0x11080509 ++ ++ li 8,0 ++ b .Lxts_enc ++ ++.Lxts_enc_no_key2: ++ li 3,-16 ++ and 5,5,3 ++ ++ ++.Lxts_enc: ++ lvx 4,0,10 ++ addi 10,10,16 ++ ++ lvsr 7,0,6 ++ lwz 9,240(6) ++ srwi 9,9,1 ++ subi 9,9,1 ++ li 3,16 ++ ++ vslb 10,9,9 ++ vor 10,10,9 ++ vspltisb 11,1 ++ vsldoi 10,10,11,15 ++ ++ cmpldi 5,96 ++ bge _aesp8_xts_encrypt6x ++ ++ andi. 7,5,15 ++ subic 0,5,32 ++ subi 7,7,16 ++ subfe 0,0,0 ++ and 0,0,7 ++ add 10,10,0 ++ ++ lvx 0,0,6 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 2,2,4,5 ++ vperm 0,1,0,7 ++ vxor 2,2,8 ++ vxor 2,2,0 ++ lvx 0,3,6 ++ addi 3,3,16 ++ mtctr 9 ++ b .Loop_xts_enc ++ ++.align 5 ++.Loop_xts_enc: ++ vperm 1,0,1,7 ++ .long 0x10420D08 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ .long 0x10420508 ++ lvx 0,3,6 ++ addi 3,3,16 ++ bdnz .Loop_xts_enc ++ ++ vperm 1,0,1,7 ++ .long 0x10420D08 ++ lvx 1,3,6 ++ li 3,16 ++ vperm 0,1,0,7 ++ vxor 0,0,8 ++ .long 0x10620509 ++ ++ vperm 11,3,3,6 ++ ++ .long 0x7D602799 ++ ++ addi 4,4,16 ++ ++ subic. 5,5,16 ++ beq .Lxts_enc_done ++ ++ vor 2,4,4 ++ lvx 4,0,10 ++ addi 10,10,16 ++ lvx 0,0,6 ++ lvx 1,3,6 ++ addi 3,3,16 ++ ++ subic 0,5,32 ++ subfe 0,0,0 ++ and 0,0,7 ++ add 10,10,0 ++ ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 8,8,11 ++ ++ vperm 2,2,4,5 ++ vperm 0,1,0,7 ++ vxor 2,2,8 ++ vxor 3,3,0 ++ vxor 2,2,0 ++ lvx 0,3,6 ++ addi 3,3,16 ++ ++ mtctr 9 ++ cmpldi 5,16 ++ bge .Loop_xts_enc ++ ++ vxor 3,3,8 ++ lvsr 5,0,5 ++ vxor 4,4,4 ++ vspltisb 11,-1 ++ vperm 4,4,11,5 ++ vsel 2,2,3,4 ++ ++ subi 11,4,17 ++ subi 4,4,16 ++ mtctr 5 ++ li 5,16 ++.Loop_xts_enc_steal: ++ lbzu 0,1(11) ++ stb 0,16(11) ++ bdnz .Loop_xts_enc_steal ++ ++ mtctr 9 ++ b .Loop_xts_enc ++ ++.Lxts_enc_done: ++ cmpldi 8,0 ++ beq .Lxts_enc_ret ++ ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 8,8,11 ++ ++ vperm 8,8,8,6 ++ .long 0x7D004799 ++ ++.Lxts_enc_ret: ++ or 12,12,12 ++ li 3,0 ++ blr ++.long 0 ++.byte 0,12,0x04,0,0x80,6,6,0 ++.long 0 ++.size aes_hw_xts_encrypt,.-aes_hw_xts_encrypt ++ ++.globl aes_hw_xts_decrypt ++.type aes_hw_xts_decrypt,@function ++.align 5 ++aes_hw_xts_decrypt: ++.localentry aes_hw_xts_decrypt,0 ++ ++ mr 10,3 ++ li 3,-1 ++ cmpldi 5,16 ++ .long 0x4dc00020 ++ ++ lis 0,0xfff8 ++ li 12,-1 ++ li 11,0 ++ or 0,0,0 ++ ++ andi. 0,5,15 ++ neg 0,0 ++ andi. 0,0,16 ++ sub 5,5,0 ++ ++ vspltisb 9,0x07 ++ lvsl 6,11,11 ++ vspltisb 11,0x0f ++ vxor 6,6,9 ++ ++ li 3,15 ++ lvx 8,0,8 ++ lvsl 5,0,8 ++ lvx 4,3,8 ++ vxor 5,5,11 ++ vperm 8,8,4,5 ++ ++ neg 11,10 ++ lvsr 5,0,11 ++ lvx 2,0,10 ++ addi 10,10,15 ++ vxor 5,5,11 ++ ++ cmpldi 7,0 ++ beq .Lxts_dec_no_key2 ++ ++ lvsr 7,0,7 ++ lwz 9,240(7) ++ srwi 9,9,1 ++ subi 9,9,1 ++ li 3,16 ++ ++ lvx 0,0,7 ++ lvx 1,3,7 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ vxor 8,8,0 ++ lvx 0,3,7 ++ addi 3,3,16 ++ mtctr 9 ++ ++.Ltweak_xts_dec: ++ vperm 1,0,1,7 ++ .long 0x11080D08 ++ lvx 1,3,7 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ .long 0x11080508 ++ lvx 0,3,7 ++ addi 3,3,16 ++ bdnz .Ltweak_xts_dec ++ ++ vperm 1,0,1,7 ++ .long 0x11080D08 ++ lvx 1,3,7 ++ vperm 0,1,0,7 ++ .long 0x11080509 ++ ++ li 8,0 ++ b .Lxts_dec ++ ++.Lxts_dec_no_key2: ++ neg 3,5 ++ andi. 3,3,15 ++ add 5,5,3 ++ ++ ++.Lxts_dec: ++ lvx 4,0,10 ++ addi 10,10,16 ++ ++ lvsr 7,0,6 ++ lwz 9,240(6) ++ srwi 9,9,1 ++ subi 9,9,1 ++ li 3,16 ++ ++ vslb 10,9,9 ++ vor 10,10,9 ++ vspltisb 11,1 ++ vsldoi 10,10,11,15 ++ ++ cmpldi 5,96 ++ bge _aesp8_xts_decrypt6x ++ ++ lvx 0,0,6 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 2,2,4,5 ++ vperm 0,1,0,7 ++ vxor 2,2,8 ++ vxor 2,2,0 ++ lvx 0,3,6 ++ addi 3,3,16 ++ mtctr 9 ++ ++ cmpldi 5,16 ++ blt .Ltail_xts_dec ++ ++ ++.align 5 ++.Loop_xts_dec: ++ vperm 1,0,1,7 ++ .long 0x10420D48 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ .long 0x10420548 ++ lvx 0,3,6 ++ addi 3,3,16 ++ bdnz .Loop_xts_dec ++ ++ vperm 1,0,1,7 ++ .long 0x10420D48 ++ lvx 1,3,6 ++ li 3,16 ++ vperm 0,1,0,7 ++ vxor 0,0,8 ++ .long 0x10620549 ++ ++ vperm 11,3,3,6 ++ ++ .long 0x7D602799 ++ ++ addi 4,4,16 ++ ++ subic. 5,5,16 ++ beq .Lxts_dec_done ++ ++ vor 2,4,4 ++ lvx 4,0,10 ++ addi 10,10,16 ++ lvx 0,0,6 ++ lvx 1,3,6 ++ addi 3,3,16 ++ ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 8,8,11 ++ ++ vperm 2,2,4,5 ++ vperm 0,1,0,7 ++ vxor 2,2,8 ++ vxor 2,2,0 ++ lvx 0,3,6 ++ addi 3,3,16 ++ ++ mtctr 9 ++ cmpldi 5,16 ++ bge .Loop_xts_dec ++ ++.Ltail_xts_dec: ++ vsrab 11,8,9 ++ vaddubm 12,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 12,12,11 ++ ++ subi 10,10,16 ++ add 10,10,5 ++ ++ vxor 2,2,8 ++ vxor 2,2,12 ++ ++.Loop_xts_dec_short: ++ vperm 1,0,1,7 ++ .long 0x10420D48 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 0,1,0,7 ++ .long 0x10420548 ++ lvx 0,3,6 ++ addi 3,3,16 ++ bdnz .Loop_xts_dec_short ++ ++ vperm 1,0,1,7 ++ .long 0x10420D48 ++ lvx 1,3,6 ++ li 3,16 ++ vperm 0,1,0,7 ++ vxor 0,0,12 ++ .long 0x10620549 ++ ++ vperm 11,3,3,6 ++ ++ .long 0x7D602799 ++ ++ ++ vor 2,4,4 ++ lvx 4,0,10 ++ ++ lvx 0,0,6 ++ lvx 1,3,6 ++ addi 3,3,16 ++ vperm 2,2,4,5 ++ vperm 0,1,0,7 ++ ++ lvsr 5,0,5 ++ vxor 4,4,4 ++ vspltisb 11,-1 ++ vperm 4,4,11,5 ++ vsel 2,2,3,4 ++ ++ vxor 0,0,8 ++ vxor 2,2,0 ++ lvx 0,3,6 ++ addi 3,3,16 ++ ++ subi 11,4,1 ++ mtctr 5 ++ li 5,16 ++.Loop_xts_dec_steal: ++ lbzu 0,1(11) ++ stb 0,16(11) ++ bdnz .Loop_xts_dec_steal ++ ++ mtctr 9 ++ b .Loop_xts_dec ++ ++.Lxts_dec_done: ++ cmpldi 8,0 ++ beq .Lxts_dec_ret ++ ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 8,8,11 ++ ++ vperm 8,8,8,6 ++ .long 0x7D004799 ++ ++.Lxts_dec_ret: ++ or 12,12,12 ++ li 3,0 ++ blr ++.long 0 ++.byte 0,12,0x04,0,0x80,6,6,0 ++.long 0 ++.size aes_hw_xts_decrypt,.-aes_hw_xts_decrypt ++.align 5 ++_aesp8_xts_encrypt6x: ++ stdu 1,-448(1) ++ mflr 11 ++ li 7,207 ++ li 3,223 ++ std 11,464(1) ++ stvx 20,7,1 ++ addi 7,7,32 ++ stvx 21,3,1 ++ addi 3,3,32 ++ stvx 22,7,1 ++ addi 7,7,32 ++ stvx 23,3,1 ++ addi 3,3,32 ++ stvx 24,7,1 ++ addi 7,7,32 ++ stvx 25,3,1 ++ addi 3,3,32 ++ stvx 26,7,1 ++ addi 7,7,32 ++ stvx 27,3,1 ++ addi 3,3,32 ++ stvx 28,7,1 ++ addi 7,7,32 ++ stvx 29,3,1 ++ addi 3,3,32 ++ stvx 30,7,1 ++ stvx 31,3,1 ++ li 0,-1 ++ stw 12,396(1) ++ li 3,0x10 ++ std 26,400(1) ++ li 26,0x20 ++ std 27,408(1) ++ li 27,0x30 ++ std 28,416(1) ++ li 28,0x40 ++ std 29,424(1) ++ li 29,0x50 ++ std 30,432(1) ++ li 30,0x60 ++ std 31,440(1) ++ li 31,0x70 ++ or 0,0,0 ++ ++ subi 9,9,3 ++ ++ lvx 23,0,6 ++ lvx 30,3,6 ++ addi 6,6,0x20 ++ lvx 31,0,6 ++ vperm 23,30,23,7 ++ addi 7,1,79 ++ mtctr 9 ++ ++.Load_xts_enc_key: ++ vperm 24,31,30,7 ++ lvx 30,3,6 ++ addi 6,6,0x20 ++ stvx 24,0,7 ++ vperm 25,30,31,7 ++ lvx 31,0,6 ++ stvx 25,3,7 ++ addi 7,7,0x20 ++ bdnz .Load_xts_enc_key ++ ++ lvx 26,3,6 ++ vperm 24,31,30,7 ++ lvx 27,26,6 ++ stvx 24,0,7 ++ vperm 25,26,31,7 ++ lvx 28,27,6 ++ stvx 25,3,7 ++ addi 7,1,79 ++ vperm 26,27,26,7 ++ lvx 29,28,6 ++ vperm 27,28,27,7 ++ lvx 30,29,6 ++ vperm 28,29,28,7 ++ lvx 31,30,6 ++ vperm 29,30,29,7 ++ lvx 22,31,6 ++ vperm 30,31,30,7 ++ lvx 24,0,7 ++ vperm 31,22,31,7 ++ lvx 25,3,7 ++ ++ vperm 0,2,4,5 ++ subi 10,10,31 ++ vxor 17,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 7,0,17 ++ vxor 8,8,11 ++ ++ .long 0x7C235699 ++ vxor 18,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 1,1,1,6 ++ vand 11,11,10 ++ vxor 12,1,18 ++ vxor 8,8,11 ++ ++ .long 0x7C5A5699 ++ andi. 31,5,15 ++ vxor 19,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 2,2,2,6 ++ vand 11,11,10 ++ vxor 13,2,19 ++ vxor 8,8,11 ++ ++ .long 0x7C7B5699 ++ sub 5,5,31 ++ vxor 20,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 3,3,3,6 ++ vand 11,11,10 ++ vxor 14,3,20 ++ vxor 8,8,11 ++ ++ .long 0x7C9C5699 ++ subi 5,5,0x60 ++ vxor 21,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 4,4,4,6 ++ vand 11,11,10 ++ vxor 15,4,21 ++ vxor 8,8,11 ++ ++ .long 0x7CBD5699 ++ addi 10,10,0x60 ++ vxor 22,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 5,5,5,6 ++ vand 11,11,10 ++ vxor 16,5,22 ++ vxor 8,8,11 ++ ++ vxor 31,31,23 ++ mtctr 9 ++ b .Loop_xts_enc6x ++ ++.align 5 ++.Loop_xts_enc6x: ++ .long 0x10E7C508 ++ .long 0x118CC508 ++ .long 0x11ADC508 ++ .long 0x11CEC508 ++ .long 0x11EFC508 ++ .long 0x1210C508 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD08 ++ .long 0x118CCD08 ++ .long 0x11ADCD08 ++ .long 0x11CECD08 ++ .long 0x11EFCD08 ++ .long 0x1210CD08 ++ lvx 25,3,7 ++ bdnz .Loop_xts_enc6x ++ ++ subic 5,5,96 ++ vxor 0,17,31 ++ .long 0x10E7C508 ++ .long 0x118CC508 ++ vsrab 11,8,9 ++ vxor 17,8,23 ++ vaddubm 8,8,8 ++ .long 0x11ADC508 ++ .long 0x11CEC508 ++ vsldoi 11,11,11,15 ++ .long 0x11EFC508 ++ .long 0x1210C508 ++ ++ subfe. 0,0,0 ++ vand 11,11,10 ++ .long 0x10E7CD08 ++ .long 0x118CCD08 ++ vxor 8,8,11 ++ .long 0x11ADCD08 ++ .long 0x11CECD08 ++ vxor 1,18,31 ++ vsrab 11,8,9 ++ vxor 18,8,23 ++ .long 0x11EFCD08 ++ .long 0x1210CD08 ++ ++ and 0,0,5 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x10E7D508 ++ .long 0x118CD508 ++ vand 11,11,10 ++ .long 0x11ADD508 ++ .long 0x11CED508 ++ vxor 8,8,11 ++ .long 0x11EFD508 ++ .long 0x1210D508 ++ ++ add 10,10,0 ++ ++ ++ ++ vxor 2,19,31 ++ vsrab 11,8,9 ++ vxor 19,8,23 ++ vaddubm 8,8,8 ++ .long 0x10E7DD08 ++ .long 0x118CDD08 ++ vsldoi 11,11,11,15 ++ .long 0x11ADDD08 ++ .long 0x11CEDD08 ++ vand 11,11,10 ++ .long 0x11EFDD08 ++ .long 0x1210DD08 ++ ++ addi 7,1,79 ++ vxor 8,8,11 ++ .long 0x10E7E508 ++ .long 0x118CE508 ++ vxor 3,20,31 ++ vsrab 11,8,9 ++ vxor 20,8,23 ++ .long 0x11ADE508 ++ .long 0x11CEE508 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x11EFE508 ++ .long 0x1210E508 ++ lvx 24,0,7 ++ vand 11,11,10 ++ ++ .long 0x10E7ED08 ++ .long 0x118CED08 ++ vxor 8,8,11 ++ .long 0x11ADED08 ++ .long 0x11CEED08 ++ vxor 4,21,31 ++ vsrab 11,8,9 ++ vxor 21,8,23 ++ .long 0x11EFED08 ++ .long 0x1210ED08 ++ lvx 25,3,7 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ ++ .long 0x10E7F508 ++ .long 0x118CF508 ++ vand 11,11,10 ++ .long 0x11ADF508 ++ .long 0x11CEF508 ++ vxor 8,8,11 ++ .long 0x11EFF508 ++ .long 0x1210F508 ++ vxor 5,22,31 ++ vsrab 11,8,9 ++ vxor 22,8,23 ++ ++ .long 0x10E70509 ++ .long 0x7C005699 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x118C0D09 ++ .long 0x7C235699 ++ .long 0x11AD1509 ++ vperm 0,0,0,6 ++ .long 0x7C5A5699 ++ vand 11,11,10 ++ .long 0x11CE1D09 ++ vperm 1,1,1,6 ++ .long 0x7C7B5699 ++ .long 0x11EF2509 ++ vperm 2,2,2,6 ++ .long 0x7C9C5699 ++ vxor 8,8,11 ++ .long 0x11702D09 ++ ++ vperm 3,3,3,6 ++ .long 0x7CBD5699 ++ addi 10,10,0x60 ++ vperm 4,4,4,6 ++ vperm 5,5,5,6 ++ ++ vperm 7,7,7,6 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,17 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vxor 12,1,18 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ vxor 13,2,19 ++ vperm 15,15,15,6 ++ .long 0x7DDB2799 ++ vxor 14,3,20 ++ vperm 16,11,11,6 ++ .long 0x7DFC2799 ++ vxor 15,4,21 ++ .long 0x7E1D2799 ++ ++ vxor 16,5,22 ++ addi 4,4,0x60 ++ ++ mtctr 9 ++ beq .Loop_xts_enc6x ++ ++ addic. 5,5,0x60 ++ beq .Lxts_enc6x_zero ++ cmpwi 5,0x20 ++ blt .Lxts_enc6x_one ++ nop ++ beq .Lxts_enc6x_two ++ cmpwi 5,0x40 ++ blt .Lxts_enc6x_three ++ nop ++ beq .Lxts_enc6x_four ++ ++.Lxts_enc6x_five: ++ vxor 7,1,17 ++ vxor 12,2,18 ++ vxor 13,3,19 ++ vxor 14,4,20 ++ vxor 15,5,21 ++ ++ bl _aesp8_xts_enc5x ++ ++ vperm 7,7,7,6 ++ vor 17,22,22 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ vxor 11,15,22 ++ vperm 15,15,15,6 ++ .long 0x7DDB2799 ++ .long 0x7DFC2799 ++ addi 4,4,0x50 ++ bne .Lxts_enc6x_steal ++ b .Lxts_enc6x_done ++ ++.align 4 ++.Lxts_enc6x_four: ++ vxor 7,2,17 ++ vxor 12,3,18 ++ vxor 13,4,19 ++ vxor 14,5,20 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_enc5x ++ ++ vperm 7,7,7,6 ++ vor 17,21,21 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vxor 11,14,21 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ .long 0x7DDB2799 ++ addi 4,4,0x40 ++ bne .Lxts_enc6x_steal ++ b .Lxts_enc6x_done ++ ++.align 4 ++.Lxts_enc6x_three: ++ vxor 7,3,17 ++ vxor 12,4,18 ++ vxor 13,5,19 ++ vxor 14,14,14 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_enc5x ++ ++ vperm 7,7,7,6 ++ vor 17,20,20 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 11,13,20 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ .long 0x7DBA2799 ++ addi 4,4,0x30 ++ bne .Lxts_enc6x_steal ++ b .Lxts_enc6x_done ++ ++.align 4 ++.Lxts_enc6x_two: ++ vxor 7,4,17 ++ vxor 12,5,18 ++ vxor 13,13,13 ++ vxor 14,14,14 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_enc5x ++ ++ vperm 7,7,7,6 ++ vor 17,19,19 ++ vxor 11,12,19 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ .long 0x7D832799 ++ addi 4,4,0x20 ++ bne .Lxts_enc6x_steal ++ b .Lxts_enc6x_done ++ ++.align 4 ++.Lxts_enc6x_one: ++ vxor 7,5,17 ++ nop ++.Loop_xts_enc1x: ++ .long 0x10E7C508 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD08 ++ lvx 25,3,7 ++ bdnz .Loop_xts_enc1x ++ ++ add 10,10,31 ++ cmpwi 31,0 ++ .long 0x10E7C508 ++ ++ subi 10,10,16 ++ .long 0x10E7CD08 ++ ++ lvsr 5,0,31 ++ .long 0x10E7D508 ++ ++ .long 0x7C005699 ++ .long 0x10E7DD08 ++ ++ addi 7,1,79 ++ .long 0x10E7E508 ++ lvx 24,0,7 ++ ++ .long 0x10E7ED08 ++ lvx 25,3,7 ++ vxor 17,17,31 ++ ++ vperm 0,0,0,6 ++ .long 0x10E7F508 ++ ++ vperm 0,0,0,5 ++ .long 0x10E78D09 ++ ++ vor 17,18,18 ++ vxor 11,7,18 ++ vperm 7,7,7,6 ++ .long 0x7CE02799 ++ addi 4,4,0x10 ++ bne .Lxts_enc6x_steal ++ b .Lxts_enc6x_done ++ ++.align 4 ++.Lxts_enc6x_zero: ++ cmpwi 31,0 ++ beq .Lxts_enc6x_done ++ ++ add 10,10,31 ++ subi 10,10,16 ++ .long 0x7C005699 ++ lvsr 5,0,31 ++ vperm 0,0,0,6 ++ vperm 0,0,0,5 ++ vxor 11,11,17 ++.Lxts_enc6x_steal: ++ vxor 0,0,17 ++ vxor 7,7,7 ++ vspltisb 12,-1 ++ vperm 7,7,12,5 ++ vsel 7,0,11,7 ++ ++ subi 30,4,17 ++ subi 4,4,16 ++ mtctr 31 ++.Loop_xts_enc6x_steal: ++ lbzu 0,1(30) ++ stb 0,16(30) ++ bdnz .Loop_xts_enc6x_steal ++ ++ li 31,0 ++ mtctr 9 ++ b .Loop_xts_enc1x ++ ++.align 4 ++.Lxts_enc6x_done: ++ cmpldi 8,0 ++ beq .Lxts_enc6x_ret ++ ++ vxor 8,17,23 ++ vperm 8,8,8,6 ++ .long 0x7D004799 ++ ++.Lxts_enc6x_ret: ++ mtlr 11 ++ li 10,79 ++ li 11,95 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ ++ or 12,12,12 ++ lvx 20,10,1 ++ addi 10,10,32 ++ lvx 21,11,1 ++ addi 11,11,32 ++ lvx 22,10,1 ++ addi 10,10,32 ++ lvx 23,11,1 ++ addi 11,11,32 ++ lvx 24,10,1 ++ addi 10,10,32 ++ lvx 25,11,1 ++ addi 11,11,32 ++ lvx 26,10,1 ++ addi 10,10,32 ++ lvx 27,11,1 ++ addi 11,11,32 ++ lvx 28,10,1 ++ addi 10,10,32 ++ lvx 29,11,1 ++ addi 11,11,32 ++ lvx 30,10,1 ++ lvx 31,11,1 ++ ld 26,400(1) ++ ld 27,408(1) ++ ld 28,416(1) ++ ld 29,424(1) ++ ld 30,432(1) ++ ld 31,440(1) ++ addi 1,1,448 ++ blr ++.long 0 ++.byte 0,12,0x04,1,0x80,6,6,0 ++.long 0 ++ ++.align 5 ++_aesp8_xts_enc5x: ++ .long 0x10E7C508 ++ .long 0x118CC508 ++ .long 0x11ADC508 ++ .long 0x11CEC508 ++ .long 0x11EFC508 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD08 ++ .long 0x118CCD08 ++ .long 0x11ADCD08 ++ .long 0x11CECD08 ++ .long 0x11EFCD08 ++ lvx 25,3,7 ++ bdnz _aesp8_xts_enc5x ++ ++ add 10,10,31 ++ cmpwi 31,0 ++ .long 0x10E7C508 ++ .long 0x118CC508 ++ .long 0x11ADC508 ++ .long 0x11CEC508 ++ .long 0x11EFC508 ++ ++ subi 10,10,16 ++ .long 0x10E7CD08 ++ .long 0x118CCD08 ++ .long 0x11ADCD08 ++ .long 0x11CECD08 ++ .long 0x11EFCD08 ++ vxor 17,17,31 ++ ++ .long 0x10E7D508 ++ lvsr 5,0,31 ++ .long 0x118CD508 ++ .long 0x11ADD508 ++ .long 0x11CED508 ++ .long 0x11EFD508 ++ vxor 1,18,31 ++ ++ .long 0x10E7DD08 ++ .long 0x7C005699 ++ .long 0x118CDD08 ++ .long 0x11ADDD08 ++ .long 0x11CEDD08 ++ .long 0x11EFDD08 ++ vxor 2,19,31 ++ ++ addi 7,1,79 ++ .long 0x10E7E508 ++ .long 0x118CE508 ++ .long 0x11ADE508 ++ .long 0x11CEE508 ++ .long 0x11EFE508 ++ lvx 24,0,7 ++ vxor 3,20,31 ++ ++ .long 0x10E7ED08 ++ vperm 0,0,0,6 ++ .long 0x118CED08 ++ .long 0x11ADED08 ++ .long 0x11CEED08 ++ .long 0x11EFED08 ++ lvx 25,3,7 ++ vxor 4,21,31 ++ ++ .long 0x10E7F508 ++ vperm 0,0,0,5 ++ .long 0x118CF508 ++ .long 0x11ADF508 ++ .long 0x11CEF508 ++ .long 0x11EFF508 ++ ++ .long 0x10E78D09 ++ .long 0x118C0D09 ++ .long 0x11AD1509 ++ .long 0x11CE1D09 ++ .long 0x11EF2509 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,0,0 ++ ++.align 5 ++_aesp8_xts_decrypt6x: ++ stdu 1,-448(1) ++ mflr 11 ++ li 7,207 ++ li 3,223 ++ std 11,464(1) ++ stvx 20,7,1 ++ addi 7,7,32 ++ stvx 21,3,1 ++ addi 3,3,32 ++ stvx 22,7,1 ++ addi 7,7,32 ++ stvx 23,3,1 ++ addi 3,3,32 ++ stvx 24,7,1 ++ addi 7,7,32 ++ stvx 25,3,1 ++ addi 3,3,32 ++ stvx 26,7,1 ++ addi 7,7,32 ++ stvx 27,3,1 ++ addi 3,3,32 ++ stvx 28,7,1 ++ addi 7,7,32 ++ stvx 29,3,1 ++ addi 3,3,32 ++ stvx 30,7,1 ++ stvx 31,3,1 ++ li 0,-1 ++ stw 12,396(1) ++ li 3,0x10 ++ std 26,400(1) ++ li 26,0x20 ++ std 27,408(1) ++ li 27,0x30 ++ std 28,416(1) ++ li 28,0x40 ++ std 29,424(1) ++ li 29,0x50 ++ std 30,432(1) ++ li 30,0x60 ++ std 31,440(1) ++ li 31,0x70 ++ or 0,0,0 ++ ++ subi 9,9,3 ++ ++ lvx 23,0,6 ++ lvx 30,3,6 ++ addi 6,6,0x20 ++ lvx 31,0,6 ++ vperm 23,30,23,7 ++ addi 7,1,79 ++ mtctr 9 ++ ++.Load_xts_dec_key: ++ vperm 24,31,30,7 ++ lvx 30,3,6 ++ addi 6,6,0x20 ++ stvx 24,0,7 ++ vperm 25,30,31,7 ++ lvx 31,0,6 ++ stvx 25,3,7 ++ addi 7,7,0x20 ++ bdnz .Load_xts_dec_key ++ ++ lvx 26,3,6 ++ vperm 24,31,30,7 ++ lvx 27,26,6 ++ stvx 24,0,7 ++ vperm 25,26,31,7 ++ lvx 28,27,6 ++ stvx 25,3,7 ++ addi 7,1,79 ++ vperm 26,27,26,7 ++ lvx 29,28,6 ++ vperm 27,28,27,7 ++ lvx 30,29,6 ++ vperm 28,29,28,7 ++ lvx 31,30,6 ++ vperm 29,30,29,7 ++ lvx 22,31,6 ++ vperm 30,31,30,7 ++ lvx 24,0,7 ++ vperm 31,22,31,7 ++ lvx 25,3,7 ++ ++ vperm 0,2,4,5 ++ subi 10,10,31 ++ vxor 17,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vand 11,11,10 ++ vxor 7,0,17 ++ vxor 8,8,11 ++ ++ .long 0x7C235699 ++ vxor 18,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 1,1,1,6 ++ vand 11,11,10 ++ vxor 12,1,18 ++ vxor 8,8,11 ++ ++ .long 0x7C5A5699 ++ andi. 31,5,15 ++ vxor 19,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 2,2,2,6 ++ vand 11,11,10 ++ vxor 13,2,19 ++ vxor 8,8,11 ++ ++ .long 0x7C7B5699 ++ sub 5,5,31 ++ vxor 20,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 3,3,3,6 ++ vand 11,11,10 ++ vxor 14,3,20 ++ vxor 8,8,11 ++ ++ .long 0x7C9C5699 ++ subi 5,5,0x60 ++ vxor 21,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 4,4,4,6 ++ vand 11,11,10 ++ vxor 15,4,21 ++ vxor 8,8,11 ++ ++ .long 0x7CBD5699 ++ addi 10,10,0x60 ++ vxor 22,8,23 ++ vsrab 11,8,9 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ vperm 5,5,5,6 ++ vand 11,11,10 ++ vxor 16,5,22 ++ vxor 8,8,11 ++ ++ vxor 31,31,23 ++ mtctr 9 ++ b .Loop_xts_dec6x ++ ++.align 5 ++.Loop_xts_dec6x: ++ .long 0x10E7C548 ++ .long 0x118CC548 ++ .long 0x11ADC548 ++ .long 0x11CEC548 ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD48 ++ .long 0x118CCD48 ++ .long 0x11ADCD48 ++ .long 0x11CECD48 ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ lvx 25,3,7 ++ bdnz .Loop_xts_dec6x ++ ++ subic 5,5,96 ++ vxor 0,17,31 ++ .long 0x10E7C548 ++ .long 0x118CC548 ++ vsrab 11,8,9 ++ vxor 17,8,23 ++ vaddubm 8,8,8 ++ .long 0x11ADC548 ++ .long 0x11CEC548 ++ vsldoi 11,11,11,15 ++ .long 0x11EFC548 ++ .long 0x1210C548 ++ ++ subfe. 0,0,0 ++ vand 11,11,10 ++ .long 0x10E7CD48 ++ .long 0x118CCD48 ++ vxor 8,8,11 ++ .long 0x11ADCD48 ++ .long 0x11CECD48 ++ vxor 1,18,31 ++ vsrab 11,8,9 ++ vxor 18,8,23 ++ .long 0x11EFCD48 ++ .long 0x1210CD48 ++ ++ and 0,0,5 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x10E7D548 ++ .long 0x118CD548 ++ vand 11,11,10 ++ .long 0x11ADD548 ++ .long 0x11CED548 ++ vxor 8,8,11 ++ .long 0x11EFD548 ++ .long 0x1210D548 ++ ++ add 10,10,0 ++ ++ ++ ++ vxor 2,19,31 ++ vsrab 11,8,9 ++ vxor 19,8,23 ++ vaddubm 8,8,8 ++ .long 0x10E7DD48 ++ .long 0x118CDD48 ++ vsldoi 11,11,11,15 ++ .long 0x11ADDD48 ++ .long 0x11CEDD48 ++ vand 11,11,10 ++ .long 0x11EFDD48 ++ .long 0x1210DD48 ++ ++ addi 7,1,79 ++ vxor 8,8,11 ++ .long 0x10E7E548 ++ .long 0x118CE548 ++ vxor 3,20,31 ++ vsrab 11,8,9 ++ vxor 20,8,23 ++ .long 0x11ADE548 ++ .long 0x11CEE548 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x11EFE548 ++ .long 0x1210E548 ++ lvx 24,0,7 ++ vand 11,11,10 ++ ++ .long 0x10E7ED48 ++ .long 0x118CED48 ++ vxor 8,8,11 ++ .long 0x11ADED48 ++ .long 0x11CEED48 ++ vxor 4,21,31 ++ vsrab 11,8,9 ++ vxor 21,8,23 ++ .long 0x11EFED48 ++ .long 0x1210ED48 ++ lvx 25,3,7 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ ++ .long 0x10E7F548 ++ .long 0x118CF548 ++ vand 11,11,10 ++ .long 0x11ADF548 ++ .long 0x11CEF548 ++ vxor 8,8,11 ++ .long 0x11EFF548 ++ .long 0x1210F548 ++ vxor 5,22,31 ++ vsrab 11,8,9 ++ vxor 22,8,23 ++ ++ .long 0x10E70549 ++ .long 0x7C005699 ++ vaddubm 8,8,8 ++ vsldoi 11,11,11,15 ++ .long 0x118C0D49 ++ .long 0x7C235699 ++ .long 0x11AD1549 ++ vperm 0,0,0,6 ++ .long 0x7C5A5699 ++ vand 11,11,10 ++ .long 0x11CE1D49 ++ vperm 1,1,1,6 ++ .long 0x7C7B5699 ++ .long 0x11EF2549 ++ vperm 2,2,2,6 ++ .long 0x7C9C5699 ++ vxor 8,8,11 ++ .long 0x12102D49 ++ vperm 3,3,3,6 ++ .long 0x7CBD5699 ++ addi 10,10,0x60 ++ vperm 4,4,4,6 ++ vperm 5,5,5,6 ++ ++ vperm 7,7,7,6 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,17 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vxor 12,1,18 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ vxor 13,2,19 ++ vperm 15,15,15,6 ++ .long 0x7DDB2799 ++ vxor 14,3,20 ++ vperm 16,16,16,6 ++ .long 0x7DFC2799 ++ vxor 15,4,21 ++ .long 0x7E1D2799 ++ vxor 16,5,22 ++ addi 4,4,0x60 ++ ++ mtctr 9 ++ beq .Loop_xts_dec6x ++ ++ addic. 5,5,0x60 ++ beq .Lxts_dec6x_zero ++ cmpwi 5,0x20 ++ blt .Lxts_dec6x_one ++ nop ++ beq .Lxts_dec6x_two ++ cmpwi 5,0x40 ++ blt .Lxts_dec6x_three ++ nop ++ beq .Lxts_dec6x_four ++ ++.Lxts_dec6x_five: ++ vxor 7,1,17 ++ vxor 12,2,18 ++ vxor 13,3,19 ++ vxor 14,4,20 ++ vxor 15,5,21 ++ ++ bl _aesp8_xts_dec5x ++ ++ vperm 7,7,7,6 ++ vor 17,22,22 ++ vxor 18,8,23 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,18 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ vperm 15,15,15,6 ++ .long 0x7DDB2799 ++ .long 0x7DFC2799 ++ addi 4,4,0x50 ++ bne .Lxts_dec6x_steal ++ b .Lxts_dec6x_done ++ ++.align 4 ++.Lxts_dec6x_four: ++ vxor 7,2,17 ++ vxor 12,3,18 ++ vxor 13,4,19 ++ vxor 14,5,20 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_dec5x ++ ++ vperm 7,7,7,6 ++ vor 17,21,21 ++ vor 18,22,22 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,22 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ vperm 14,14,14,6 ++ .long 0x7DBA2799 ++ .long 0x7DDB2799 ++ addi 4,4,0x40 ++ bne .Lxts_dec6x_steal ++ b .Lxts_dec6x_done ++ ++.align 4 ++.Lxts_dec6x_three: ++ vxor 7,3,17 ++ vxor 12,4,18 ++ vxor 13,5,19 ++ vxor 14,14,14 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_dec5x ++ ++ vperm 7,7,7,6 ++ vor 17,20,20 ++ vor 18,21,21 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,21 ++ vperm 13,13,13,6 ++ .long 0x7D832799 ++ .long 0x7DBA2799 ++ addi 4,4,0x30 ++ bne .Lxts_dec6x_steal ++ b .Lxts_dec6x_done ++ ++.align 4 ++.Lxts_dec6x_two: ++ vxor 7,4,17 ++ vxor 12,5,18 ++ vxor 13,13,13 ++ vxor 14,14,14 ++ vxor 15,15,15 ++ ++ bl _aesp8_xts_dec5x ++ ++ vperm 7,7,7,6 ++ vor 17,19,19 ++ vor 18,20,20 ++ vperm 12,12,12,6 ++ .long 0x7CE02799 ++ vxor 7,0,20 ++ .long 0x7D832799 ++ addi 4,4,0x20 ++ bne .Lxts_dec6x_steal ++ b .Lxts_dec6x_done ++ ++.align 4 ++.Lxts_dec6x_one: ++ vxor 7,5,17 ++ nop ++.Loop_xts_dec1x: ++ .long 0x10E7C548 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD48 ++ lvx 25,3,7 ++ bdnz .Loop_xts_dec1x ++ ++ subi 0,31,1 ++ .long 0x10E7C548 ++ ++ andi. 0,0,16 ++ cmpwi 31,0 ++ .long 0x10E7CD48 ++ ++ sub 10,10,0 ++ .long 0x10E7D548 ++ ++ .long 0x7C005699 ++ .long 0x10E7DD48 ++ ++ addi 7,1,79 ++ .long 0x10E7E548 ++ lvx 24,0,7 ++ ++ .long 0x10E7ED48 ++ lvx 25,3,7 ++ vxor 17,17,31 ++ ++ vperm 0,0,0,6 ++ .long 0x10E7F548 ++ ++ mtctr 9 ++ .long 0x10E78D49 ++ ++ vor 17,18,18 ++ vor 18,19,19 ++ vperm 7,7,7,6 ++ .long 0x7CE02799 ++ addi 4,4,0x10 ++ vxor 7,0,19 ++ bne .Lxts_dec6x_steal ++ b .Lxts_dec6x_done ++ ++.align 4 ++.Lxts_dec6x_zero: ++ cmpwi 31,0 ++ beq .Lxts_dec6x_done ++ ++ .long 0x7C005699 ++ vperm 0,0,0,6 ++ vxor 7,0,18 ++.Lxts_dec6x_steal: ++ .long 0x10E7C548 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD48 ++ lvx 25,3,7 ++ bdnz .Lxts_dec6x_steal ++ ++ add 10,10,31 ++ .long 0x10E7C548 ++ ++ cmpwi 31,0 ++ .long 0x10E7CD48 ++ ++ .long 0x7C005699 ++ .long 0x10E7D548 ++ ++ lvsr 5,0,31 ++ .long 0x10E7DD48 ++ ++ addi 7,1,79 ++ .long 0x10E7E548 ++ lvx 24,0,7 ++ ++ .long 0x10E7ED48 ++ lvx 25,3,7 ++ vxor 18,18,31 ++ ++ vperm 0,0,0,6 ++ .long 0x10E7F548 ++ ++ vperm 0,0,0,5 ++ .long 0x11679549 ++ ++ vperm 7,11,11,6 ++ .long 0x7CE02799 ++ ++ ++ vxor 7,7,7 ++ vspltisb 12,-1 ++ vperm 7,7,12,5 ++ vsel 7,0,11,7 ++ vxor 7,7,17 ++ ++ subi 30,4,1 ++ mtctr 31 ++.Loop_xts_dec6x_steal: ++ lbzu 0,1(30) ++ stb 0,16(30) ++ bdnz .Loop_xts_dec6x_steal ++ ++ li 31,0 ++ mtctr 9 ++ b .Loop_xts_dec1x ++ ++.align 4 ++.Lxts_dec6x_done: ++ cmpldi 8,0 ++ beq .Lxts_dec6x_ret ++ ++ vxor 8,17,23 ++ vperm 8,8,8,6 ++ .long 0x7D004799 ++ ++.Lxts_dec6x_ret: ++ mtlr 11 ++ li 10,79 ++ li 11,95 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ stvx 9,10,1 ++ addi 10,10,32 ++ stvx 9,11,1 ++ addi 11,11,32 ++ ++ or 12,12,12 ++ lvx 20,10,1 ++ addi 10,10,32 ++ lvx 21,11,1 ++ addi 11,11,32 ++ lvx 22,10,1 ++ addi 10,10,32 ++ lvx 23,11,1 ++ addi 11,11,32 ++ lvx 24,10,1 ++ addi 10,10,32 ++ lvx 25,11,1 ++ addi 11,11,32 ++ lvx 26,10,1 ++ addi 10,10,32 ++ lvx 27,11,1 ++ addi 11,11,32 ++ lvx 28,10,1 ++ addi 10,10,32 ++ lvx 29,11,1 ++ addi 11,11,32 ++ lvx 30,10,1 ++ lvx 31,11,1 ++ ld 26,400(1) ++ ld 27,408(1) ++ ld 28,416(1) ++ ld 29,424(1) ++ ld 30,432(1) ++ ld 31,440(1) ++ addi 1,1,448 ++ blr ++.long 0 ++.byte 0,12,0x04,1,0x80,6,6,0 ++.long 0 ++ ++.align 5 ++_aesp8_xts_dec5x: ++ .long 0x10E7C548 ++ .long 0x118CC548 ++ .long 0x11ADC548 ++ .long 0x11CEC548 ++ .long 0x11EFC548 ++ lvx 24,26,7 ++ addi 7,7,0x20 ++ ++ .long 0x10E7CD48 ++ .long 0x118CCD48 ++ .long 0x11ADCD48 ++ .long 0x11CECD48 ++ .long 0x11EFCD48 ++ lvx 25,3,7 ++ bdnz _aesp8_xts_dec5x ++ ++ subi 0,31,1 ++ .long 0x10E7C548 ++ .long 0x118CC548 ++ .long 0x11ADC548 ++ .long 0x11CEC548 ++ .long 0x11EFC548 ++ ++ andi. 0,0,16 ++ cmpwi 31,0 ++ .long 0x10E7CD48 ++ .long 0x118CCD48 ++ .long 0x11ADCD48 ++ .long 0x11CECD48 ++ .long 0x11EFCD48 ++ vxor 17,17,31 ++ ++ sub 10,10,0 ++ .long 0x10E7D548 ++ .long 0x118CD548 ++ .long 0x11ADD548 ++ .long 0x11CED548 ++ .long 0x11EFD548 ++ vxor 1,18,31 ++ ++ .long 0x10E7DD48 ++ .long 0x7C005699 ++ .long 0x118CDD48 ++ .long 0x11ADDD48 ++ .long 0x11CEDD48 ++ .long 0x11EFDD48 ++ vxor 2,19,31 ++ ++ addi 7,1,79 ++ .long 0x10E7E548 ++ .long 0x118CE548 ++ .long 0x11ADE548 ++ .long 0x11CEE548 ++ .long 0x11EFE548 ++ lvx 24,0,7 ++ vxor 3,20,31 ++ ++ .long 0x10E7ED48 ++ vperm 0,0,0,6 ++ .long 0x118CED48 ++ .long 0x11ADED48 ++ .long 0x11CEED48 ++ .long 0x11EFED48 ++ lvx 25,3,7 ++ vxor 4,21,31 ++ ++ .long 0x10E7F548 ++ .long 0x118CF548 ++ .long 0x11ADF548 ++ .long 0x11CEF548 ++ .long 0x11EFF548 ++ ++ .long 0x10E78D49 ++ .long 0x118C0D49 ++ .long 0x11AD1549 ++ .long 0x11CE1D49 ++ .long 0x11EF2549 ++ mtctr 9 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,0,0 ++#endif // !OPENSSL_NO_ASM && __powerpc64__ && __ELF__ ++#if defined(__ELF__) ++// See https://www.airs.com/blog/archives/518. ++.section .note.GNU-stack,"",%progbits ++#endif +Index: chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc-linux.S +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc-linux.S +@@ -0,0 +1,590 @@ ++// This file is generated from a similarly-named Perl script in the BoringSSL ++// source tree. Do not edit by hand. ++ ++#if defined(__has_feature) ++#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) ++#define OPENSSL_NO_ASM ++#endif ++#endif ++ ++#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) && defined(__ELF__) ++.machine "any" ++ ++.abiversion 2 ++.text ++ ++.globl gcm_init_p8 ++.type gcm_init_p8,@function ++.align 5 ++gcm_init_p8: ++.localentry gcm_init_p8,0 ++ ++ li 0,-4096 ++ li 8,0x10 ++ li 12,-1 ++ li 9,0x20 ++ or 0,0,0 ++ li 10,0x30 ++ .long 0x7D202699 ++ ++ vspltisb 8,-16 ++ vspltisb 5,1 ++ vaddubm 8,8,8 ++ vxor 4,4,4 ++ vor 8,8,5 ++ vsldoi 8,8,4,15 ++ vsldoi 6,4,5,1 ++ vaddubm 8,8,8 ++ vspltisb 7,7 ++ vor 8,8,6 ++ vspltb 6,9,0 ++ vsl 9,9,5 ++ vsrab 6,6,7 ++ vand 6,6,8 ++ vxor 3,9,6 ++ ++ vsldoi 9,3,3,8 ++ vsldoi 8,4,8,8 ++ vsldoi 11,4,9,8 ++ vsldoi 10,9,4,8 ++ ++ .long 0x7D001F99 ++ .long 0x7D681F99 ++ li 8,0x40 ++ .long 0x7D291F99 ++ li 9,0x50 ++ .long 0x7D4A1F99 ++ li 10,0x60 ++ ++ .long 0x10035CC8 ++ .long 0x10234CC8 ++ .long 0x104354C8 ++ ++ .long 0x10E044C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ ++ vsldoi 6,0,0,8 ++ .long 0x100044C8 ++ vxor 6,6,2 ++ vxor 16,0,6 ++ ++ vsldoi 17,16,16,8 ++ vsldoi 19,4,17,8 ++ vsldoi 18,17,4,8 ++ ++ .long 0x7E681F99 ++ li 8,0x70 ++ .long 0x7E291F99 ++ li 9,0x80 ++ .long 0x7E4A1F99 ++ li 10,0x90 ++ .long 0x10039CC8 ++ .long 0x11B09CC8 ++ .long 0x10238CC8 ++ .long 0x11D08CC8 ++ .long 0x104394C8 ++ .long 0x11F094C8 ++ ++ .long 0x10E044C8 ++ .long 0x114D44C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vsldoi 11,14,4,8 ++ vsldoi 9,4,14,8 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ vxor 13,13,11 ++ vxor 15,15,9 ++ ++ vsldoi 0,0,0,8 ++ vsldoi 13,13,13,8 ++ vxor 0,0,7 ++ vxor 13,13,10 ++ ++ vsldoi 6,0,0,8 ++ vsldoi 9,13,13,8 ++ .long 0x100044C8 ++ .long 0x11AD44C8 ++ vxor 6,6,2 ++ vxor 9,9,15 ++ vxor 0,0,6 ++ vxor 13,13,9 ++ ++ vsldoi 9,0,0,8 ++ vsldoi 17,13,13,8 ++ vsldoi 11,4,9,8 ++ vsldoi 10,9,4,8 ++ vsldoi 19,4,17,8 ++ vsldoi 18,17,4,8 ++ ++ .long 0x7D681F99 ++ li 8,0xa0 ++ .long 0x7D291F99 ++ li 9,0xb0 ++ .long 0x7D4A1F99 ++ li 10,0xc0 ++ .long 0x7E681F99 ++ .long 0x7E291F99 ++ .long 0x7E4A1F99 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,2,0 ++.long 0 ++.size gcm_init_p8,.-gcm_init_p8 ++.globl gcm_gmult_p8 ++.type gcm_gmult_p8,@function ++.align 5 ++gcm_gmult_p8: ++.localentry gcm_gmult_p8,0 ++ ++ lis 0,0xfff8 ++ li 8,0x10 ++ li 12,-1 ++ li 9,0x20 ++ or 0,0,0 ++ li 10,0x30 ++ .long 0x7C601E99 ++ ++ .long 0x7D682699 ++ lvsl 12,0,0 ++ .long 0x7D292699 ++ vspltisb 5,0x07 ++ .long 0x7D4A2699 ++ vxor 12,12,5 ++ .long 0x7D002699 ++ vperm 3,3,3,12 ++ vxor 4,4,4 ++ ++ .long 0x10035CC8 ++ .long 0x10234CC8 ++ .long 0x104354C8 ++ ++ .long 0x10E044C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ ++ vsldoi 6,0,0,8 ++ .long 0x100044C8 ++ vxor 6,6,2 ++ vxor 0,0,6 ++ ++ vperm 0,0,0,12 ++ .long 0x7C001F99 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,2,0 ++.long 0 ++.size gcm_gmult_p8,.-gcm_gmult_p8 ++ ++.globl gcm_ghash_p8 ++.type gcm_ghash_p8,@function ++.align 5 ++gcm_ghash_p8: ++.localentry gcm_ghash_p8,0 ++ ++ li 0,-4096 ++ li 8,0x10 ++ li 12,-1 ++ li 9,0x20 ++ or 0,0,0 ++ li 10,0x30 ++ .long 0x7C001E99 ++ ++ .long 0x7D682699 ++ li 8,0x40 ++ lvsl 12,0,0 ++ .long 0x7D292699 ++ li 9,0x50 ++ vspltisb 5,0x07 ++ .long 0x7D4A2699 ++ li 10,0x60 ++ vxor 12,12,5 ++ .long 0x7D002699 ++ vperm 0,0,0,12 ++ vxor 4,4,4 ++ ++ cmpldi 6,64 ++ bge .Lgcm_ghash_p8_4x ++ ++ .long 0x7C602E99 ++ addi 5,5,16 ++ subic. 6,6,16 ++ vperm 3,3,3,12 ++ vxor 3,3,0 ++ beq .Lshort ++ ++ .long 0x7E682699 ++ li 8,16 ++ .long 0x7E292699 ++ add 9,5,6 ++ .long 0x7E4A2699 ++ ++ ++.align 5 ++.Loop_2x: ++ .long 0x7E002E99 ++ vperm 16,16,16,12 ++ ++ subic 6,6,32 ++ .long 0x10039CC8 ++ .long 0x11B05CC8 ++ subfe 0,0,0 ++ .long 0x10238CC8 ++ .long 0x11D04CC8 ++ and 0,0,6 ++ .long 0x104394C8 ++ .long 0x11F054C8 ++ add 5,5,0 ++ ++ vxor 0,0,13 ++ vxor 1,1,14 ++ ++ .long 0x10E044C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 2,2,15 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ .long 0x7C682E99 ++ addi 5,5,32 ++ ++ vsldoi 6,0,0,8 ++ .long 0x100044C8 ++ vperm 3,3,3,12 ++ vxor 6,6,2 ++ vxor 3,3,6 ++ vxor 3,3,0 ++ cmpld 9,5 ++ bgt .Loop_2x ++ ++ cmplwi 6,0 ++ bne .Leven ++ ++.Lshort: ++ .long 0x10035CC8 ++ .long 0x10234CC8 ++ .long 0x104354C8 ++ ++ .long 0x10E044C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ ++ vsldoi 6,0,0,8 ++ .long 0x100044C8 ++ vxor 6,6,2 ++ ++.Leven: ++ vxor 0,0,6 ++ vperm 0,0,0,12 ++ .long 0x7C001F99 ++ ++ or 12,12,12 ++ blr ++.long 0 ++.byte 0,12,0x14,0,0,0,4,0 ++.long 0 ++.align 5 ++.gcm_ghash_p8_4x: ++.Lgcm_ghash_p8_4x: ++ stdu 1,-256(1) ++ li 10,63 ++ li 11,79 ++ stvx 20,10,1 ++ addi 10,10,32 ++ stvx 21,11,1 ++ addi 11,11,32 ++ stvx 22,10,1 ++ addi 10,10,32 ++ stvx 23,11,1 ++ addi 11,11,32 ++ stvx 24,10,1 ++ addi 10,10,32 ++ stvx 25,11,1 ++ addi 11,11,32 ++ stvx 26,10,1 ++ addi 10,10,32 ++ stvx 27,11,1 ++ addi 11,11,32 ++ stvx 28,10,1 ++ addi 10,10,32 ++ stvx 29,11,1 ++ addi 11,11,32 ++ stvx 30,10,1 ++ li 10,0x60 ++ stvx 31,11,1 ++ li 0,-1 ++ stw 12,252(1) ++ or 0,0,0 ++ ++ lvsl 5,0,8 ++ ++ li 8,0x70 ++ .long 0x7E292699 ++ li 9,0x80 ++ vspltisb 6,8 ++ ++ li 10,0x90 ++ .long 0x7EE82699 ++ li 8,0xa0 ++ .long 0x7F092699 ++ li 9,0xb0 ++ .long 0x7F2A2699 ++ li 10,0xc0 ++ .long 0x7FA82699 ++ li 8,0x10 ++ .long 0x7FC92699 ++ li 9,0x20 ++ .long 0x7FEA2699 ++ li 10,0x30 ++ ++ vsldoi 7,4,6,8 ++ vaddubm 18,5,7 ++ vaddubm 19,6,18 ++ ++ srdi 6,6,4 ++ ++ .long 0x7C602E99 ++ .long 0x7E082E99 ++ subic. 6,6,8 ++ .long 0x7EC92E99 ++ .long 0x7F8A2E99 ++ addi 5,5,0x40 ++ vperm 3,3,3,12 ++ vperm 16,16,16,12 ++ vperm 22,22,22,12 ++ vperm 28,28,28,12 ++ ++ vxor 2,3,0 ++ ++ .long 0x11B0BCC8 ++ .long 0x11D0C4C8 ++ .long 0x11F0CCC8 ++ ++ vperm 11,17,9,18 ++ vperm 5,22,28,19 ++ vperm 10,17,9,19 ++ vperm 6,22,28,18 ++ .long 0x12B68CC8 ++ .long 0x12855CC8 ++ .long 0x137C4CC8 ++ .long 0x134654C8 ++ ++ vxor 21,21,14 ++ vxor 20,20,13 ++ vxor 27,27,21 ++ vxor 26,26,15 ++ ++ blt .Ltail_4x ++ ++.Loop_4x: ++ .long 0x7C602E99 ++ .long 0x7E082E99 ++ subic. 6,6,4 ++ .long 0x7EC92E99 ++ .long 0x7F8A2E99 ++ addi 5,5,0x40 ++ vperm 16,16,16,12 ++ vperm 22,22,22,12 ++ vperm 28,28,28,12 ++ vperm 3,3,3,12 ++ ++ .long 0x1002ECC8 ++ .long 0x1022F4C8 ++ .long 0x1042FCC8 ++ .long 0x11B0BCC8 ++ .long 0x11D0C4C8 ++ .long 0x11F0CCC8 ++ ++ vxor 0,0,20 ++ vxor 1,1,27 ++ vxor 2,2,26 ++ vperm 5,22,28,19 ++ vperm 6,22,28,18 ++ ++ .long 0x10E044C8 ++ .long 0x12855CC8 ++ .long 0x134654C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ ++ vsldoi 6,0,0,8 ++ .long 0x12B68CC8 ++ .long 0x137C4CC8 ++ .long 0x100044C8 ++ ++ vxor 20,20,13 ++ vxor 26,26,15 ++ vxor 2,2,3 ++ vxor 21,21,14 ++ vxor 2,2,6 ++ vxor 27,27,21 ++ vxor 2,2,0 ++ bge .Loop_4x ++ ++.Ltail_4x: ++ .long 0x1002ECC8 ++ .long 0x1022F4C8 ++ .long 0x1042FCC8 ++ ++ vxor 0,0,20 ++ vxor 1,1,27 ++ ++ .long 0x10E044C8 ++ ++ vsldoi 5,1,4,8 ++ vsldoi 6,4,1,8 ++ vxor 2,2,26 ++ vxor 0,0,5 ++ vxor 2,2,6 ++ ++ vsldoi 0,0,0,8 ++ vxor 0,0,7 ++ ++ vsldoi 6,0,0,8 ++ .long 0x100044C8 ++ vxor 6,6,2 ++ vxor 0,0,6 ++ ++ addic. 6,6,4 ++ beq .Ldone_4x ++ ++ .long 0x7C602E99 ++ cmpldi 6,2 ++ li 6,-4 ++ blt .Lone ++ .long 0x7E082E99 ++ beq .Ltwo ++ ++.Lthree: ++ .long 0x7EC92E99 ++ vperm 3,3,3,12 ++ vperm 16,16,16,12 ++ vperm 22,22,22,12 ++ ++ vxor 2,3,0 ++ vor 29,23,23 ++ vor 30, 24, 24 ++ vor 31,25,25 ++ ++ vperm 5,16,22,19 ++ vperm 6,16,22,18 ++ .long 0x12B08CC8 ++ .long 0x13764CC8 ++ .long 0x12855CC8 ++ .long 0x134654C8 ++ ++ vxor 27,27,21 ++ b .Ltail_4x ++ ++.align 4 ++.Ltwo: ++ vperm 3,3,3,12 ++ vperm 16,16,16,12 ++ ++ vxor 2,3,0 ++ vperm 5,4,16,19 ++ vperm 6,4,16,18 ++ ++ vsldoi 29,4,17,8 ++ vor 30, 17, 17 ++ vsldoi 31,17,4,8 ++ ++ .long 0x12855CC8 ++ .long 0x13704CC8 ++ .long 0x134654C8 ++ ++ b .Ltail_4x ++ ++.align 4 ++.Lone: ++ vperm 3,3,3,12 ++ ++ vsldoi 29,4,9,8 ++ vor 30, 9, 9 ++ vsldoi 31,9,4,8 ++ ++ vxor 2,3,0 ++ vxor 20,20,20 ++ vxor 27,27,27 ++ vxor 26,26,26 ++ ++ b .Ltail_4x ++ ++.Ldone_4x: ++ vperm 0,0,0,12 ++ .long 0x7C001F99 ++ ++ li 10,63 ++ li 11,79 ++ or 12,12,12 ++ lvx 20,10,1 ++ addi 10,10,32 ++ lvx 21,11,1 ++ addi 11,11,32 ++ lvx 22,10,1 ++ addi 10,10,32 ++ lvx 23,11,1 ++ addi 11,11,32 ++ lvx 24,10,1 ++ addi 10,10,32 ++ lvx 25,11,1 ++ addi 11,11,32 ++ lvx 26,10,1 ++ addi 10,10,32 ++ lvx 27,11,1 ++ addi 11,11,32 ++ lvx 28,10,1 ++ addi 10,10,32 ++ lvx 29,11,1 ++ addi 11,11,32 ++ lvx 30,10,1 ++ lvx 31,11,1 ++ addi 1,1,256 ++ blr ++.long 0 ++.byte 0,12,0x04,0,0x80,0,4,0 ++.long 0 ++.size gcm_ghash_p8,.-gcm_ghash_p8 ++ ++.byte 71,72,65,83,72,32,102,111,114,32,80,111,119,101,114,73,83,65,32,50,46,48,55,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 ++.align 2 ++.align 2 ++#endif // !OPENSSL_NO_ASM && __powerpc64__ && __ELF__ ++#if defined(__ELF__) ++// See https://www.airs.com/blog/archives/518. ++.section .note.GNU-stack,"",%progbits ++#endif +Index: chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/test/trampoline-ppc-linux.S +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/boringssl/linux-ppc64le/crypto/test/trampoline-ppc-linux.S +@@ -0,0 +1,1413 @@ ++// This file is generated from a similarly-named Perl script in the BoringSSL ++// source tree. Do not edit by hand. ++ ++#if defined(__has_feature) ++#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) ++#define OPENSSL_NO_ASM ++#endif ++#endif ++ ++#if !defined(OPENSSL_NO_ASM) && defined(__powerpc64__) && defined(__ELF__) ++.machine "any" ++.abiversion 2 ++.text ++ ++ ++ ++ ++ ++ ++ ++.globl abi_test_trampoline ++.type abi_test_trampoline,@function ++.align 5 ++abi_test_trampoline: ++.localentry abi_test_trampoline,0 ++ ++ ++ mflr 0 ++ std 0, 16(1) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ stdu 1, -528(1) ++ ++ mfcr 0 ++ std 0, 8(1) ++ std 2, 24(1) ++ std 4, 32(1) ++ li 11, 48 ++ stvx 20, 11, 1 ++ li 11, 64 ++ stvx 21, 11, 1 ++ li 11, 80 ++ stvx 22, 11, 1 ++ li 11, 96 ++ stvx 23, 11, 1 ++ li 11, 112 ++ stvx 24, 11, 1 ++ li 11, 128 ++ stvx 25, 11, 1 ++ li 11, 144 ++ stvx 26, 11, 1 ++ li 11, 160 ++ stvx 27, 11, 1 ++ li 11, 176 ++ stvx 28, 11, 1 ++ li 11, 192 ++ stvx 29, 11, 1 ++ li 11, 208 ++ stvx 30, 11, 1 ++ li 11, 224 ++ stvx 31, 11, 1 ++ std 14, 240(1) ++ std 15, 248(1) ++ std 16, 256(1) ++ std 17, 264(1) ++ std 18, 272(1) ++ std 19, 280(1) ++ std 20, 288(1) ++ std 21, 296(1) ++ std 22, 304(1) ++ std 23, 312(1) ++ std 24, 320(1) ++ std 25, 328(1) ++ std 26, 336(1) ++ std 27, 344(1) ++ std 28, 352(1) ++ std 29, 360(1) ++ std 30, 368(1) ++ std 31, 376(1) ++ stfd 14, 384(1) ++ stfd 15, 392(1) ++ stfd 16, 400(1) ++ stfd 17, 408(1) ++ stfd 18, 416(1) ++ stfd 19, 424(1) ++ stfd 20, 432(1) ++ stfd 21, 440(1) ++ stfd 22, 448(1) ++ stfd 23, 456(1) ++ stfd 24, 464(1) ++ stfd 25, 472(1) ++ stfd 26, 480(1) ++ stfd 27, 488(1) ++ stfd 28, 496(1) ++ stfd 29, 504(1) ++ stfd 30, 512(1) ++ stfd 31, 520(1) ++ li 11, 0 ++ lvx 20, 11, 4 ++ li 11, 16 ++ lvx 21, 11, 4 ++ li 11, 32 ++ lvx 22, 11, 4 ++ li 11, 48 ++ lvx 23, 11, 4 ++ li 11, 64 ++ lvx 24, 11, 4 ++ li 11, 80 ++ lvx 25, 11, 4 ++ li 11, 96 ++ lvx 26, 11, 4 ++ li 11, 112 ++ lvx 27, 11, 4 ++ li 11, 128 ++ lvx 28, 11, 4 ++ li 11, 144 ++ lvx 29, 11, 4 ++ li 11, 160 ++ lvx 30, 11, 4 ++ li 11, 176 ++ lvx 31, 11, 4 ++ ld 14, 192(4) ++ ld 15, 200(4) ++ ld 16, 208(4) ++ ld 17, 216(4) ++ ld 18, 224(4) ++ ld 19, 232(4) ++ ld 20, 240(4) ++ ld 21, 248(4) ++ ld 22, 256(4) ++ ld 23, 264(4) ++ ld 24, 272(4) ++ ld 25, 280(4) ++ ld 26, 288(4) ++ ld 27, 296(4) ++ ld 28, 304(4) ++ ld 29, 312(4) ++ ld 30, 320(4) ++ ld 31, 328(4) ++ lfd 14, 336(4) ++ lfd 15, 344(4) ++ lfd 16, 352(4) ++ lfd 17, 360(4) ++ lfd 18, 368(4) ++ lfd 19, 376(4) ++ lfd 20, 384(4) ++ lfd 21, 392(4) ++ lfd 22, 400(4) ++ lfd 23, 408(4) ++ lfd 24, 416(4) ++ lfd 25, 424(4) ++ lfd 26, 432(4) ++ lfd 27, 440(4) ++ lfd 28, 448(4) ++ lfd 29, 456(4) ++ lfd 30, 464(4) ++ lfd 31, 472(4) ++ ++ ld 0, 480(4) ++ mtcr 0 ++ ++ ++ addi 11, 5, -8 ++ mr 12, 3 ++ ++ ++ cmpdi 6, 0 ++ beq .Largs_done ++ mtctr 6 ++ ldu 3, 8(11) ++ bdz .Largs_done ++ ldu 4, 8(11) ++ bdz .Largs_done ++ ldu 5, 8(11) ++ bdz .Largs_done ++ ldu 6, 8(11) ++ bdz .Largs_done ++ ldu 7, 8(11) ++ bdz .Largs_done ++ ldu 8, 8(11) ++ bdz .Largs_done ++ ldu 9, 8(11) ++ bdz .Largs_done ++ ldu 10, 8(11) ++ ++.Largs_done: ++ li 2, 0 ++ mtctr 12 ++ bctrl ++ ld 2, 24(1) ++ ++ ld 4, 32(1) ++ li 11, 0 ++ stvx 20, 11, 4 ++ li 11, 16 ++ stvx 21, 11, 4 ++ li 11, 32 ++ stvx 22, 11, 4 ++ li 11, 48 ++ stvx 23, 11, 4 ++ li 11, 64 ++ stvx 24, 11, 4 ++ li 11, 80 ++ stvx 25, 11, 4 ++ li 11, 96 ++ stvx 26, 11, 4 ++ li 11, 112 ++ stvx 27, 11, 4 ++ li 11, 128 ++ stvx 28, 11, 4 ++ li 11, 144 ++ stvx 29, 11, 4 ++ li 11, 160 ++ stvx 30, 11, 4 ++ li 11, 176 ++ stvx 31, 11, 4 ++ std 14, 192(4) ++ std 15, 200(4) ++ std 16, 208(4) ++ std 17, 216(4) ++ std 18, 224(4) ++ std 19, 232(4) ++ std 20, 240(4) ++ std 21, 248(4) ++ std 22, 256(4) ++ std 23, 264(4) ++ std 24, 272(4) ++ std 25, 280(4) ++ std 26, 288(4) ++ std 27, 296(4) ++ std 28, 304(4) ++ std 29, 312(4) ++ std 30, 320(4) ++ std 31, 328(4) ++ stfd 14, 336(4) ++ stfd 15, 344(4) ++ stfd 16, 352(4) ++ stfd 17, 360(4) ++ stfd 18, 368(4) ++ stfd 19, 376(4) ++ stfd 20, 384(4) ++ stfd 21, 392(4) ++ stfd 22, 400(4) ++ stfd 23, 408(4) ++ stfd 24, 416(4) ++ stfd 25, 424(4) ++ stfd 26, 432(4) ++ stfd 27, 440(4) ++ stfd 28, 448(4) ++ stfd 29, 456(4) ++ stfd 30, 464(4) ++ stfd 31, 472(4) ++ li 11, 48 ++ lvx 20, 11, 1 ++ li 11, 64 ++ lvx 21, 11, 1 ++ li 11, 80 ++ lvx 22, 11, 1 ++ li 11, 96 ++ lvx 23, 11, 1 ++ li 11, 112 ++ lvx 24, 11, 1 ++ li 11, 128 ++ lvx 25, 11, 1 ++ li 11, 144 ++ lvx 26, 11, 1 ++ li 11, 160 ++ lvx 27, 11, 1 ++ li 11, 176 ++ lvx 28, 11, 1 ++ li 11, 192 ++ lvx 29, 11, 1 ++ li 11, 208 ++ lvx 30, 11, 1 ++ li 11, 224 ++ lvx 31, 11, 1 ++ ld 14, 240(1) ++ ld 15, 248(1) ++ ld 16, 256(1) ++ ld 17, 264(1) ++ ld 18, 272(1) ++ ld 19, 280(1) ++ ld 20, 288(1) ++ ld 21, 296(1) ++ ld 22, 304(1) ++ ld 23, 312(1) ++ ld 24, 320(1) ++ ld 25, 328(1) ++ ld 26, 336(1) ++ ld 27, 344(1) ++ ld 28, 352(1) ++ ld 29, 360(1) ++ ld 30, 368(1) ++ ld 31, 376(1) ++ lfd 14, 384(1) ++ lfd 15, 392(1) ++ lfd 16, 400(1) ++ lfd 17, 408(1) ++ lfd 18, 416(1) ++ lfd 19, 424(1) ++ lfd 20, 432(1) ++ lfd 21, 440(1) ++ lfd 22, 448(1) ++ lfd 23, 456(1) ++ lfd 24, 464(1) ++ lfd 25, 472(1) ++ lfd 26, 480(1) ++ lfd 27, 488(1) ++ lfd 28, 496(1) ++ lfd 29, 504(1) ++ lfd 30, 512(1) ++ lfd 31, 520(1) ++ mfcr 0 ++ std 0, 480(4) ++ ld 0, 8(1) ++ mtcrf 0b00111000, 0 ++ addi 1, 1, 528 ++ ld 0, 16(1) ++ mtlr 0 ++ blr ++.size abi_test_trampoline,.-abi_test_trampoline ++.globl abi_test_clobber_r0 ++.type abi_test_clobber_r0,@function ++.align 5 ++abi_test_clobber_r0: ++.localentry abi_test_clobber_r0,0 ++ ++ li 0, 0 ++ blr ++.size abi_test_clobber_r0,.-abi_test_clobber_r0 ++.globl abi_test_clobber_r2 ++.type abi_test_clobber_r2,@function ++.align 5 ++abi_test_clobber_r2: ++.localentry abi_test_clobber_r2,0 ++ ++ li 2, 0 ++ blr ++.size abi_test_clobber_r2,.-abi_test_clobber_r2 ++.globl abi_test_clobber_r3 ++.type abi_test_clobber_r3,@function ++.align 5 ++abi_test_clobber_r3: ++.localentry abi_test_clobber_r3,0 ++ ++ li 3, 0 ++ blr ++.size abi_test_clobber_r3,.-abi_test_clobber_r3 ++.globl abi_test_clobber_r4 ++.type abi_test_clobber_r4,@function ++.align 5 ++abi_test_clobber_r4: ++.localentry abi_test_clobber_r4,0 ++ ++ li 4, 0 ++ blr ++.size abi_test_clobber_r4,.-abi_test_clobber_r4 ++.globl abi_test_clobber_r5 ++.type abi_test_clobber_r5,@function ++.align 5 ++abi_test_clobber_r5: ++.localentry abi_test_clobber_r5,0 ++ ++ li 5, 0 ++ blr ++.size abi_test_clobber_r5,.-abi_test_clobber_r5 ++.globl abi_test_clobber_r6 ++.type abi_test_clobber_r6,@function ++.align 5 ++abi_test_clobber_r6: ++.localentry abi_test_clobber_r6,0 ++ ++ li 6, 0 ++ blr ++.size abi_test_clobber_r6,.-abi_test_clobber_r6 ++.globl abi_test_clobber_r7 ++.type abi_test_clobber_r7,@function ++.align 5 ++abi_test_clobber_r7: ++.localentry abi_test_clobber_r7,0 ++ ++ li 7, 0 ++ blr ++.size abi_test_clobber_r7,.-abi_test_clobber_r7 ++.globl abi_test_clobber_r8 ++.type abi_test_clobber_r8,@function ++.align 5 ++abi_test_clobber_r8: ++.localentry abi_test_clobber_r8,0 ++ ++ li 8, 0 ++ blr ++.size abi_test_clobber_r8,.-abi_test_clobber_r8 ++.globl abi_test_clobber_r9 ++.type abi_test_clobber_r9,@function ++.align 5 ++abi_test_clobber_r9: ++.localentry abi_test_clobber_r9,0 ++ ++ li 9, 0 ++ blr ++.size abi_test_clobber_r9,.-abi_test_clobber_r9 ++.globl abi_test_clobber_r10 ++.type abi_test_clobber_r10,@function ++.align 5 ++abi_test_clobber_r10: ++.localentry abi_test_clobber_r10,0 ++ ++ li 10, 0 ++ blr ++.size abi_test_clobber_r10,.-abi_test_clobber_r10 ++.globl abi_test_clobber_r11 ++.type abi_test_clobber_r11,@function ++.align 5 ++abi_test_clobber_r11: ++.localentry abi_test_clobber_r11,0 ++ ++ li 11, 0 ++ blr ++.size abi_test_clobber_r11,.-abi_test_clobber_r11 ++.globl abi_test_clobber_r12 ++.type abi_test_clobber_r12,@function ++.align 5 ++abi_test_clobber_r12: ++.localentry abi_test_clobber_r12,0 ++ ++ li 12, 0 ++ blr ++.size abi_test_clobber_r12,.-abi_test_clobber_r12 ++.globl abi_test_clobber_r14 ++.type abi_test_clobber_r14,@function ++.align 5 ++abi_test_clobber_r14: ++.localentry abi_test_clobber_r14,0 ++ ++ li 14, 0 ++ blr ++.size abi_test_clobber_r14,.-abi_test_clobber_r14 ++.globl abi_test_clobber_r15 ++.type abi_test_clobber_r15,@function ++.align 5 ++abi_test_clobber_r15: ++.localentry abi_test_clobber_r15,0 ++ ++ li 15, 0 ++ blr ++.size abi_test_clobber_r15,.-abi_test_clobber_r15 ++.globl abi_test_clobber_r16 ++.type abi_test_clobber_r16,@function ++.align 5 ++abi_test_clobber_r16: ++.localentry abi_test_clobber_r16,0 ++ ++ li 16, 0 ++ blr ++.size abi_test_clobber_r16,.-abi_test_clobber_r16 ++.globl abi_test_clobber_r17 ++.type abi_test_clobber_r17,@function ++.align 5 ++abi_test_clobber_r17: ++.localentry abi_test_clobber_r17,0 ++ ++ li 17, 0 ++ blr ++.size abi_test_clobber_r17,.-abi_test_clobber_r17 ++.globl abi_test_clobber_r18 ++.type abi_test_clobber_r18,@function ++.align 5 ++abi_test_clobber_r18: ++.localentry abi_test_clobber_r18,0 ++ ++ li 18, 0 ++ blr ++.size abi_test_clobber_r18,.-abi_test_clobber_r18 ++.globl abi_test_clobber_r19 ++.type abi_test_clobber_r19,@function ++.align 5 ++abi_test_clobber_r19: ++.localentry abi_test_clobber_r19,0 ++ ++ li 19, 0 ++ blr ++.size abi_test_clobber_r19,.-abi_test_clobber_r19 ++.globl abi_test_clobber_r20 ++.type abi_test_clobber_r20,@function ++.align 5 ++abi_test_clobber_r20: ++.localentry abi_test_clobber_r20,0 ++ ++ li 20, 0 ++ blr ++.size abi_test_clobber_r20,.-abi_test_clobber_r20 ++.globl abi_test_clobber_r21 ++.type abi_test_clobber_r21,@function ++.align 5 ++abi_test_clobber_r21: ++.localentry abi_test_clobber_r21,0 ++ ++ li 21, 0 ++ blr ++.size abi_test_clobber_r21,.-abi_test_clobber_r21 ++.globl abi_test_clobber_r22 ++.type abi_test_clobber_r22,@function ++.align 5 ++abi_test_clobber_r22: ++.localentry abi_test_clobber_r22,0 ++ ++ li 22, 0 ++ blr ++.size abi_test_clobber_r22,.-abi_test_clobber_r22 ++.globl abi_test_clobber_r23 ++.type abi_test_clobber_r23,@function ++.align 5 ++abi_test_clobber_r23: ++.localentry abi_test_clobber_r23,0 ++ ++ li 23, 0 ++ blr ++.size abi_test_clobber_r23,.-abi_test_clobber_r23 ++.globl abi_test_clobber_r24 ++.type abi_test_clobber_r24,@function ++.align 5 ++abi_test_clobber_r24: ++.localentry abi_test_clobber_r24,0 ++ ++ li 24, 0 ++ blr ++.size abi_test_clobber_r24,.-abi_test_clobber_r24 ++.globl abi_test_clobber_r25 ++.type abi_test_clobber_r25,@function ++.align 5 ++abi_test_clobber_r25: ++.localentry abi_test_clobber_r25,0 ++ ++ li 25, 0 ++ blr ++.size abi_test_clobber_r25,.-abi_test_clobber_r25 ++.globl abi_test_clobber_r26 ++.type abi_test_clobber_r26,@function ++.align 5 ++abi_test_clobber_r26: ++.localentry abi_test_clobber_r26,0 ++ ++ li 26, 0 ++ blr ++.size abi_test_clobber_r26,.-abi_test_clobber_r26 ++.globl abi_test_clobber_r27 ++.type abi_test_clobber_r27,@function ++.align 5 ++abi_test_clobber_r27: ++.localentry abi_test_clobber_r27,0 ++ ++ li 27, 0 ++ blr ++.size abi_test_clobber_r27,.-abi_test_clobber_r27 ++.globl abi_test_clobber_r28 ++.type abi_test_clobber_r28,@function ++.align 5 ++abi_test_clobber_r28: ++.localentry abi_test_clobber_r28,0 ++ ++ li 28, 0 ++ blr ++.size abi_test_clobber_r28,.-abi_test_clobber_r28 ++.globl abi_test_clobber_r29 ++.type abi_test_clobber_r29,@function ++.align 5 ++abi_test_clobber_r29: ++.localentry abi_test_clobber_r29,0 ++ ++ li 29, 0 ++ blr ++.size abi_test_clobber_r29,.-abi_test_clobber_r29 ++.globl abi_test_clobber_r30 ++.type abi_test_clobber_r30,@function ++.align 5 ++abi_test_clobber_r30: ++.localentry abi_test_clobber_r30,0 ++ ++ li 30, 0 ++ blr ++.size abi_test_clobber_r30,.-abi_test_clobber_r30 ++.globl abi_test_clobber_r31 ++.type abi_test_clobber_r31,@function ++.align 5 ++abi_test_clobber_r31: ++.localentry abi_test_clobber_r31,0 ++ ++ li 31, 0 ++ blr ++.size abi_test_clobber_r31,.-abi_test_clobber_r31 ++.globl abi_test_clobber_f0 ++.type abi_test_clobber_f0,@function ++.align 4 ++abi_test_clobber_f0: ++.localentry abi_test_clobber_f0,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 0, -8(1) ++ blr ++.size abi_test_clobber_f0,.-abi_test_clobber_f0 ++.globl abi_test_clobber_f1 ++.type abi_test_clobber_f1,@function ++.align 4 ++abi_test_clobber_f1: ++.localentry abi_test_clobber_f1,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 1, -8(1) ++ blr ++.size abi_test_clobber_f1,.-abi_test_clobber_f1 ++.globl abi_test_clobber_f2 ++.type abi_test_clobber_f2,@function ++.align 4 ++abi_test_clobber_f2: ++.localentry abi_test_clobber_f2,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 2, -8(1) ++ blr ++.size abi_test_clobber_f2,.-abi_test_clobber_f2 ++.globl abi_test_clobber_f3 ++.type abi_test_clobber_f3,@function ++.align 4 ++abi_test_clobber_f3: ++.localentry abi_test_clobber_f3,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 3, -8(1) ++ blr ++.size abi_test_clobber_f3,.-abi_test_clobber_f3 ++.globl abi_test_clobber_f4 ++.type abi_test_clobber_f4,@function ++.align 4 ++abi_test_clobber_f4: ++.localentry abi_test_clobber_f4,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 4, -8(1) ++ blr ++.size abi_test_clobber_f4,.-abi_test_clobber_f4 ++.globl abi_test_clobber_f5 ++.type abi_test_clobber_f5,@function ++.align 4 ++abi_test_clobber_f5: ++.localentry abi_test_clobber_f5,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 5, -8(1) ++ blr ++.size abi_test_clobber_f5,.-abi_test_clobber_f5 ++.globl abi_test_clobber_f6 ++.type abi_test_clobber_f6,@function ++.align 4 ++abi_test_clobber_f6: ++.localentry abi_test_clobber_f6,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 6, -8(1) ++ blr ++.size abi_test_clobber_f6,.-abi_test_clobber_f6 ++.globl abi_test_clobber_f7 ++.type abi_test_clobber_f7,@function ++.align 4 ++abi_test_clobber_f7: ++.localentry abi_test_clobber_f7,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 7, -8(1) ++ blr ++.size abi_test_clobber_f7,.-abi_test_clobber_f7 ++.globl abi_test_clobber_f8 ++.type abi_test_clobber_f8,@function ++.align 4 ++abi_test_clobber_f8: ++.localentry abi_test_clobber_f8,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 8, -8(1) ++ blr ++.size abi_test_clobber_f8,.-abi_test_clobber_f8 ++.globl abi_test_clobber_f9 ++.type abi_test_clobber_f9,@function ++.align 4 ++abi_test_clobber_f9: ++.localentry abi_test_clobber_f9,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 9, -8(1) ++ blr ++.size abi_test_clobber_f9,.-abi_test_clobber_f9 ++.globl abi_test_clobber_f10 ++.type abi_test_clobber_f10,@function ++.align 4 ++abi_test_clobber_f10: ++.localentry abi_test_clobber_f10,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 10, -8(1) ++ blr ++.size abi_test_clobber_f10,.-abi_test_clobber_f10 ++.globl abi_test_clobber_f11 ++.type abi_test_clobber_f11,@function ++.align 4 ++abi_test_clobber_f11: ++.localentry abi_test_clobber_f11,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 11, -8(1) ++ blr ++.size abi_test_clobber_f11,.-abi_test_clobber_f11 ++.globl abi_test_clobber_f12 ++.type abi_test_clobber_f12,@function ++.align 4 ++abi_test_clobber_f12: ++.localentry abi_test_clobber_f12,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 12, -8(1) ++ blr ++.size abi_test_clobber_f12,.-abi_test_clobber_f12 ++.globl abi_test_clobber_f13 ++.type abi_test_clobber_f13,@function ++.align 4 ++abi_test_clobber_f13: ++.localentry abi_test_clobber_f13,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 13, -8(1) ++ blr ++.size abi_test_clobber_f13,.-abi_test_clobber_f13 ++.globl abi_test_clobber_f14 ++.type abi_test_clobber_f14,@function ++.align 4 ++abi_test_clobber_f14: ++.localentry abi_test_clobber_f14,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 14, -8(1) ++ blr ++.size abi_test_clobber_f14,.-abi_test_clobber_f14 ++.globl abi_test_clobber_f15 ++.type abi_test_clobber_f15,@function ++.align 4 ++abi_test_clobber_f15: ++.localentry abi_test_clobber_f15,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 15, -8(1) ++ blr ++.size abi_test_clobber_f15,.-abi_test_clobber_f15 ++.globl abi_test_clobber_f16 ++.type abi_test_clobber_f16,@function ++.align 4 ++abi_test_clobber_f16: ++.localentry abi_test_clobber_f16,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 16, -8(1) ++ blr ++.size abi_test_clobber_f16,.-abi_test_clobber_f16 ++.globl abi_test_clobber_f17 ++.type abi_test_clobber_f17,@function ++.align 4 ++abi_test_clobber_f17: ++.localentry abi_test_clobber_f17,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 17, -8(1) ++ blr ++.size abi_test_clobber_f17,.-abi_test_clobber_f17 ++.globl abi_test_clobber_f18 ++.type abi_test_clobber_f18,@function ++.align 4 ++abi_test_clobber_f18: ++.localentry abi_test_clobber_f18,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 18, -8(1) ++ blr ++.size abi_test_clobber_f18,.-abi_test_clobber_f18 ++.globl abi_test_clobber_f19 ++.type abi_test_clobber_f19,@function ++.align 4 ++abi_test_clobber_f19: ++.localentry abi_test_clobber_f19,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 19, -8(1) ++ blr ++.size abi_test_clobber_f19,.-abi_test_clobber_f19 ++.globl abi_test_clobber_f20 ++.type abi_test_clobber_f20,@function ++.align 4 ++abi_test_clobber_f20: ++.localentry abi_test_clobber_f20,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 20, -8(1) ++ blr ++.size abi_test_clobber_f20,.-abi_test_clobber_f20 ++.globl abi_test_clobber_f21 ++.type abi_test_clobber_f21,@function ++.align 4 ++abi_test_clobber_f21: ++.localentry abi_test_clobber_f21,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 21, -8(1) ++ blr ++.size abi_test_clobber_f21,.-abi_test_clobber_f21 ++.globl abi_test_clobber_f22 ++.type abi_test_clobber_f22,@function ++.align 4 ++abi_test_clobber_f22: ++.localentry abi_test_clobber_f22,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 22, -8(1) ++ blr ++.size abi_test_clobber_f22,.-abi_test_clobber_f22 ++.globl abi_test_clobber_f23 ++.type abi_test_clobber_f23,@function ++.align 4 ++abi_test_clobber_f23: ++.localentry abi_test_clobber_f23,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 23, -8(1) ++ blr ++.size abi_test_clobber_f23,.-abi_test_clobber_f23 ++.globl abi_test_clobber_f24 ++.type abi_test_clobber_f24,@function ++.align 4 ++abi_test_clobber_f24: ++.localentry abi_test_clobber_f24,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 24, -8(1) ++ blr ++.size abi_test_clobber_f24,.-abi_test_clobber_f24 ++.globl abi_test_clobber_f25 ++.type abi_test_clobber_f25,@function ++.align 4 ++abi_test_clobber_f25: ++.localentry abi_test_clobber_f25,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 25, -8(1) ++ blr ++.size abi_test_clobber_f25,.-abi_test_clobber_f25 ++.globl abi_test_clobber_f26 ++.type abi_test_clobber_f26,@function ++.align 4 ++abi_test_clobber_f26: ++.localentry abi_test_clobber_f26,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 26, -8(1) ++ blr ++.size abi_test_clobber_f26,.-abi_test_clobber_f26 ++.globl abi_test_clobber_f27 ++.type abi_test_clobber_f27,@function ++.align 4 ++abi_test_clobber_f27: ++.localentry abi_test_clobber_f27,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 27, -8(1) ++ blr ++.size abi_test_clobber_f27,.-abi_test_clobber_f27 ++.globl abi_test_clobber_f28 ++.type abi_test_clobber_f28,@function ++.align 4 ++abi_test_clobber_f28: ++.localentry abi_test_clobber_f28,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 28, -8(1) ++ blr ++.size abi_test_clobber_f28,.-abi_test_clobber_f28 ++.globl abi_test_clobber_f29 ++.type abi_test_clobber_f29,@function ++.align 4 ++abi_test_clobber_f29: ++.localentry abi_test_clobber_f29,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 29, -8(1) ++ blr ++.size abi_test_clobber_f29,.-abi_test_clobber_f29 ++.globl abi_test_clobber_f30 ++.type abi_test_clobber_f30,@function ++.align 4 ++abi_test_clobber_f30: ++.localentry abi_test_clobber_f30,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 30, -8(1) ++ blr ++.size abi_test_clobber_f30,.-abi_test_clobber_f30 ++.globl abi_test_clobber_f31 ++.type abi_test_clobber_f31,@function ++.align 4 ++abi_test_clobber_f31: ++.localentry abi_test_clobber_f31,0 ++ ++ li 0, 0 ++ ++ std 0, -8(1) ++ lfd 31, -8(1) ++ blr ++.size abi_test_clobber_f31,.-abi_test_clobber_f31 ++.globl abi_test_clobber_v0 ++.type abi_test_clobber_v0,@function ++.align 4 ++abi_test_clobber_v0: ++.localentry abi_test_clobber_v0,0 ++ ++ vxor 0, 0, 0 ++ blr ++.size abi_test_clobber_v0,.-abi_test_clobber_v0 ++.globl abi_test_clobber_v1 ++.type abi_test_clobber_v1,@function ++.align 4 ++abi_test_clobber_v1: ++.localentry abi_test_clobber_v1,0 ++ ++ vxor 1, 1, 1 ++ blr ++.size abi_test_clobber_v1,.-abi_test_clobber_v1 ++.globl abi_test_clobber_v2 ++.type abi_test_clobber_v2,@function ++.align 4 ++abi_test_clobber_v2: ++.localentry abi_test_clobber_v2,0 ++ ++ vxor 2, 2, 2 ++ blr ++.size abi_test_clobber_v2,.-abi_test_clobber_v2 ++.globl abi_test_clobber_v3 ++.type abi_test_clobber_v3,@function ++.align 4 ++abi_test_clobber_v3: ++.localentry abi_test_clobber_v3,0 ++ ++ vxor 3, 3, 3 ++ blr ++.size abi_test_clobber_v3,.-abi_test_clobber_v3 ++.globl abi_test_clobber_v4 ++.type abi_test_clobber_v4,@function ++.align 4 ++abi_test_clobber_v4: ++.localentry abi_test_clobber_v4,0 ++ ++ vxor 4, 4, 4 ++ blr ++.size abi_test_clobber_v4,.-abi_test_clobber_v4 ++.globl abi_test_clobber_v5 ++.type abi_test_clobber_v5,@function ++.align 4 ++abi_test_clobber_v5: ++.localentry abi_test_clobber_v5,0 ++ ++ vxor 5, 5, 5 ++ blr ++.size abi_test_clobber_v5,.-abi_test_clobber_v5 ++.globl abi_test_clobber_v6 ++.type abi_test_clobber_v6,@function ++.align 4 ++abi_test_clobber_v6: ++.localentry abi_test_clobber_v6,0 ++ ++ vxor 6, 6, 6 ++ blr ++.size abi_test_clobber_v6,.-abi_test_clobber_v6 ++.globl abi_test_clobber_v7 ++.type abi_test_clobber_v7,@function ++.align 4 ++abi_test_clobber_v7: ++.localentry abi_test_clobber_v7,0 ++ ++ vxor 7, 7, 7 ++ blr ++.size abi_test_clobber_v7,.-abi_test_clobber_v7 ++.globl abi_test_clobber_v8 ++.type abi_test_clobber_v8,@function ++.align 4 ++abi_test_clobber_v8: ++.localentry abi_test_clobber_v8,0 ++ ++ vxor 8, 8, 8 ++ blr ++.size abi_test_clobber_v8,.-abi_test_clobber_v8 ++.globl abi_test_clobber_v9 ++.type abi_test_clobber_v9,@function ++.align 4 ++abi_test_clobber_v9: ++.localentry abi_test_clobber_v9,0 ++ ++ vxor 9, 9, 9 ++ blr ++.size abi_test_clobber_v9,.-abi_test_clobber_v9 ++.globl abi_test_clobber_v10 ++.type abi_test_clobber_v10,@function ++.align 4 ++abi_test_clobber_v10: ++.localentry abi_test_clobber_v10,0 ++ ++ vxor 10, 10, 10 ++ blr ++.size abi_test_clobber_v10,.-abi_test_clobber_v10 ++.globl abi_test_clobber_v11 ++.type abi_test_clobber_v11,@function ++.align 4 ++abi_test_clobber_v11: ++.localentry abi_test_clobber_v11,0 ++ ++ vxor 11, 11, 11 ++ blr ++.size abi_test_clobber_v11,.-abi_test_clobber_v11 ++.globl abi_test_clobber_v12 ++.type abi_test_clobber_v12,@function ++.align 4 ++abi_test_clobber_v12: ++.localentry abi_test_clobber_v12,0 ++ ++ vxor 12, 12, 12 ++ blr ++.size abi_test_clobber_v12,.-abi_test_clobber_v12 ++.globl abi_test_clobber_v13 ++.type abi_test_clobber_v13,@function ++.align 4 ++abi_test_clobber_v13: ++.localentry abi_test_clobber_v13,0 ++ ++ vxor 13, 13, 13 ++ blr ++.size abi_test_clobber_v13,.-abi_test_clobber_v13 ++.globl abi_test_clobber_v14 ++.type abi_test_clobber_v14,@function ++.align 4 ++abi_test_clobber_v14: ++.localentry abi_test_clobber_v14,0 ++ ++ vxor 14, 14, 14 ++ blr ++.size abi_test_clobber_v14,.-abi_test_clobber_v14 ++.globl abi_test_clobber_v15 ++.type abi_test_clobber_v15,@function ++.align 4 ++abi_test_clobber_v15: ++.localentry abi_test_clobber_v15,0 ++ ++ vxor 15, 15, 15 ++ blr ++.size abi_test_clobber_v15,.-abi_test_clobber_v15 ++.globl abi_test_clobber_v16 ++.type abi_test_clobber_v16,@function ++.align 4 ++abi_test_clobber_v16: ++.localentry abi_test_clobber_v16,0 ++ ++ vxor 16, 16, 16 ++ blr ++.size abi_test_clobber_v16,.-abi_test_clobber_v16 ++.globl abi_test_clobber_v17 ++.type abi_test_clobber_v17,@function ++.align 4 ++abi_test_clobber_v17: ++.localentry abi_test_clobber_v17,0 ++ ++ vxor 17, 17, 17 ++ blr ++.size abi_test_clobber_v17,.-abi_test_clobber_v17 ++.globl abi_test_clobber_v18 ++.type abi_test_clobber_v18,@function ++.align 4 ++abi_test_clobber_v18: ++.localentry abi_test_clobber_v18,0 ++ ++ vxor 18, 18, 18 ++ blr ++.size abi_test_clobber_v18,.-abi_test_clobber_v18 ++.globl abi_test_clobber_v19 ++.type abi_test_clobber_v19,@function ++.align 4 ++abi_test_clobber_v19: ++.localentry abi_test_clobber_v19,0 ++ ++ vxor 19, 19, 19 ++ blr ++.size abi_test_clobber_v19,.-abi_test_clobber_v19 ++.globl abi_test_clobber_v20 ++.type abi_test_clobber_v20,@function ++.align 4 ++abi_test_clobber_v20: ++.localentry abi_test_clobber_v20,0 ++ ++ vxor 20, 20, 20 ++ blr ++.size abi_test_clobber_v20,.-abi_test_clobber_v20 ++.globl abi_test_clobber_v21 ++.type abi_test_clobber_v21,@function ++.align 4 ++abi_test_clobber_v21: ++.localentry abi_test_clobber_v21,0 ++ ++ vxor 21, 21, 21 ++ blr ++.size abi_test_clobber_v21,.-abi_test_clobber_v21 ++.globl abi_test_clobber_v22 ++.type abi_test_clobber_v22,@function ++.align 4 ++abi_test_clobber_v22: ++.localentry abi_test_clobber_v22,0 ++ ++ vxor 22, 22, 22 ++ blr ++.size abi_test_clobber_v22,.-abi_test_clobber_v22 ++.globl abi_test_clobber_v23 ++.type abi_test_clobber_v23,@function ++.align 4 ++abi_test_clobber_v23: ++.localentry abi_test_clobber_v23,0 ++ ++ vxor 23, 23, 23 ++ blr ++.size abi_test_clobber_v23,.-abi_test_clobber_v23 ++.globl abi_test_clobber_v24 ++.type abi_test_clobber_v24,@function ++.align 4 ++abi_test_clobber_v24: ++.localentry abi_test_clobber_v24,0 ++ ++ vxor 24, 24, 24 ++ blr ++.size abi_test_clobber_v24,.-abi_test_clobber_v24 ++.globl abi_test_clobber_v25 ++.type abi_test_clobber_v25,@function ++.align 4 ++abi_test_clobber_v25: ++.localentry abi_test_clobber_v25,0 ++ ++ vxor 25, 25, 25 ++ blr ++.size abi_test_clobber_v25,.-abi_test_clobber_v25 ++.globl abi_test_clobber_v26 ++.type abi_test_clobber_v26,@function ++.align 4 ++abi_test_clobber_v26: ++.localentry abi_test_clobber_v26,0 ++ ++ vxor 26, 26, 26 ++ blr ++.size abi_test_clobber_v26,.-abi_test_clobber_v26 ++.globl abi_test_clobber_v27 ++.type abi_test_clobber_v27,@function ++.align 4 ++abi_test_clobber_v27: ++.localentry abi_test_clobber_v27,0 ++ ++ vxor 27, 27, 27 ++ blr ++.size abi_test_clobber_v27,.-abi_test_clobber_v27 ++.globl abi_test_clobber_v28 ++.type abi_test_clobber_v28,@function ++.align 4 ++abi_test_clobber_v28: ++.localentry abi_test_clobber_v28,0 ++ ++ vxor 28, 28, 28 ++ blr ++.size abi_test_clobber_v28,.-abi_test_clobber_v28 ++.globl abi_test_clobber_v29 ++.type abi_test_clobber_v29,@function ++.align 4 ++abi_test_clobber_v29: ++.localentry abi_test_clobber_v29,0 ++ ++ vxor 29, 29, 29 ++ blr ++.size abi_test_clobber_v29,.-abi_test_clobber_v29 ++.globl abi_test_clobber_v30 ++.type abi_test_clobber_v30,@function ++.align 4 ++abi_test_clobber_v30: ++.localentry abi_test_clobber_v30,0 ++ ++ vxor 30, 30, 30 ++ blr ++.size abi_test_clobber_v30,.-abi_test_clobber_v30 ++.globl abi_test_clobber_v31 ++.type abi_test_clobber_v31,@function ++.align 4 ++abi_test_clobber_v31: ++.localentry abi_test_clobber_v31,0 ++ ++ vxor 31, 31, 31 ++ blr ++.size abi_test_clobber_v31,.-abi_test_clobber_v31 ++.globl abi_test_clobber_cr0 ++.type abi_test_clobber_cr0,@function ++.align 4 ++abi_test_clobber_cr0: ++.localentry abi_test_clobber_cr0,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 128, 0 ++ blr ++.size abi_test_clobber_cr0,.-abi_test_clobber_cr0 ++.globl abi_test_clobber_cr1 ++.type abi_test_clobber_cr1,@function ++.align 4 ++abi_test_clobber_cr1: ++.localentry abi_test_clobber_cr1,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 64, 0 ++ blr ++.size abi_test_clobber_cr1,.-abi_test_clobber_cr1 ++.globl abi_test_clobber_cr2 ++.type abi_test_clobber_cr2,@function ++.align 4 ++abi_test_clobber_cr2: ++.localentry abi_test_clobber_cr2,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 32, 0 ++ blr ++.size abi_test_clobber_cr2,.-abi_test_clobber_cr2 ++.globl abi_test_clobber_cr3 ++.type abi_test_clobber_cr3,@function ++.align 4 ++abi_test_clobber_cr3: ++.localentry abi_test_clobber_cr3,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 16, 0 ++ blr ++.size abi_test_clobber_cr3,.-abi_test_clobber_cr3 ++.globl abi_test_clobber_cr4 ++.type abi_test_clobber_cr4,@function ++.align 4 ++abi_test_clobber_cr4: ++.localentry abi_test_clobber_cr4,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 8, 0 ++ blr ++.size abi_test_clobber_cr4,.-abi_test_clobber_cr4 ++.globl abi_test_clobber_cr5 ++.type abi_test_clobber_cr5,@function ++.align 4 ++abi_test_clobber_cr5: ++.localentry abi_test_clobber_cr5,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 4, 0 ++ blr ++.size abi_test_clobber_cr5,.-abi_test_clobber_cr5 ++.globl abi_test_clobber_cr6 ++.type abi_test_clobber_cr6,@function ++.align 4 ++abi_test_clobber_cr6: ++.localentry abi_test_clobber_cr6,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 2, 0 ++ blr ++.size abi_test_clobber_cr6,.-abi_test_clobber_cr6 ++.globl abi_test_clobber_cr7 ++.type abi_test_clobber_cr7,@function ++.align 4 ++abi_test_clobber_cr7: ++.localentry abi_test_clobber_cr7,0 ++ ++ ++ ++ mfcr 0 ++ not 0, 0 ++ mtcrf 1, 0 ++ blr ++.size abi_test_clobber_cr7,.-abi_test_clobber_cr7 ++.globl abi_test_clobber_ctr ++.type abi_test_clobber_ctr,@function ++.align 4 ++abi_test_clobber_ctr: ++.localentry abi_test_clobber_ctr,0 ++ ++ li 0, 0 ++ mtctr 0 ++ blr ++.size abi_test_clobber_ctr,.-abi_test_clobber_ctr ++ ++.globl abi_test_clobber_lr ++.type abi_test_clobber_lr,@function ++.align 4 ++abi_test_clobber_lr: ++.localentry abi_test_clobber_lr,0 ++ ++ mflr 0 ++ mtctr 0 ++ li 0, 0 ++ mtlr 0 ++ bctr ++.size abi_test_clobber_lr,.-abi_test_clobber_lr ++ ++#endif // !OPENSSL_NO_ASM && __powerpc64__ && __ELF__ ++#if defined(__ELF__) ++// See https://www.airs.com/blog/archives/518. ++.section .note.GNU-stack,"",%progbits ++#endif diff --git a/0002-third_party-libvpx-Remove-bad-ppc64-config.patch b/0002-third_party-libvpx-Remove-bad-ppc64-config.patch new file mode 100644 index 0000000..ea60d42 --- /dev/null +++ b/0002-third_party-libvpx-Remove-bad-ppc64-config.patch @@ -0,0 +1,3016 @@ +diff --git a/third_party/libvpx/source/config/linux/ppc64/vp8_rtcd.h b/third_party/libvpx/source/config/linux/ppc64/vp8_rtcd.h +deleted file mode 100644 +index 225647f..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vp8_rtcd.h ++++ /dev/null +@@ -1,320 +0,0 @@ +-// This file is generated. Do not edit. +-#ifndef VP8_RTCD_H_ +-#define VP8_RTCD_H_ +- +-#ifdef RTCD_C +-#define RTCD_EXTERN +-#else +-#define RTCD_EXTERN extern +-#endif +- +-/* +- * VP8 +- */ +- +-struct blockd; +-struct macroblockd; +-struct loop_filter_info; +- +-/* Encoder forward decls */ +-struct block; +-struct macroblock; +-struct variance_vtable; +-union int_mv; +-struct yv12_buffer_config; +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-void vp8_bilinear_predict16x16_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c +- +-void vp8_bilinear_predict4x4_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c +- +-void vp8_bilinear_predict8x4_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c +- +-void vp8_bilinear_predict8x8_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c +- +-int vp8_block_error_c(short* coeff, short* dqcoeff); +-#define vp8_block_error vp8_block_error_c +- +-void vp8_copy32xn_c(const unsigned char* src_ptr, +- int src_stride, +- unsigned char* dst_ptr, +- int dst_stride, +- int height); +-#define vp8_copy32xn vp8_copy32xn_c +- +-void vp8_copy_mem16x16_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride); +-#define vp8_copy_mem16x16 vp8_copy_mem16x16_c +- +-void vp8_copy_mem8x4_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride); +-#define vp8_copy_mem8x4 vp8_copy_mem8x4_c +- +-void vp8_copy_mem8x8_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride); +-#define vp8_copy_mem8x8 vp8_copy_mem8x8_c +- +-void vp8_dc_only_idct_add_c(short input_dc, +- unsigned char* pred_ptr, +- int pred_stride, +- unsigned char* dst_ptr, +- int dst_stride); +-#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c +- +-int vp8_denoiser_filter_c(unsigned char* mc_running_avg_y, +- int mc_avg_y_stride, +- unsigned char* running_avg_y, +- int avg_y_stride, +- unsigned char* sig, +- int sig_stride, +- unsigned int motion_magnitude, +- int increase_denoising); +-#define vp8_denoiser_filter vp8_denoiser_filter_c +- +-int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg, +- int mc_avg_stride, +- unsigned char* running_avg, +- int avg_stride, +- unsigned char* sig, +- int sig_stride, +- unsigned int motion_magnitude, +- int increase_denoising); +-#define vp8_denoiser_filter_uv vp8_denoiser_filter_uv_c +- +-void vp8_dequant_idct_add_c(short* input, +- short* dq, +- unsigned char* dest, +- int stride); +-#define vp8_dequant_idct_add vp8_dequant_idct_add_c +- +-void vp8_dequant_idct_add_uv_block_c(short* q, +- short* dq, +- unsigned char* dst_u, +- unsigned char* dst_v, +- int stride, +- char* eobs); +-#define vp8_dequant_idct_add_uv_block vp8_dequant_idct_add_uv_block_c +- +-void vp8_dequant_idct_add_y_block_c(short* q, +- short* dq, +- unsigned char* dst, +- int stride, +- char* eobs); +-#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c +- +-void vp8_dequantize_b_c(struct blockd*, short* DQC); +-#define vp8_dequantize_b vp8_dequantize_b_c +- +-int vp8_diamond_search_sad_c(struct macroblock* x, +- struct block* b, +- struct blockd* d, +- union int_mv* ref_mv, +- union int_mv* best_mv, +- int search_param, +- int sad_per_bit, +- int* num00, +- struct variance_vtable* fn_ptr, +- int* mvcost[2], +- union int_mv* center_mv); +-#define vp8_diamond_search_sad vp8_diamond_search_sad_c +- +-void vp8_fast_quantize_b_c(struct block*, struct blockd*); +-#define vp8_fast_quantize_b vp8_fast_quantize_b_c +- +-void vp8_filter_by_weight16x16_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride, +- int src_weight); +-#define vp8_filter_by_weight16x16 vp8_filter_by_weight16x16_c +- +-void vp8_filter_by_weight4x4_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride, +- int src_weight); +-#define vp8_filter_by_weight4x4 vp8_filter_by_weight4x4_c +- +-void vp8_filter_by_weight8x8_c(unsigned char* src, +- int src_stride, +- unsigned char* dst, +- int dst_stride, +- int src_weight); +-#define vp8_filter_by_weight8x8 vp8_filter_by_weight8x8_c +- +-void vp8_loop_filter_bh_c(unsigned char* y_ptr, +- unsigned char* u_ptr, +- unsigned char* v_ptr, +- int y_stride, +- int uv_stride, +- struct loop_filter_info* lfi); +-#define vp8_loop_filter_bh vp8_loop_filter_bh_c +- +-void vp8_loop_filter_bv_c(unsigned char* y_ptr, +- unsigned char* u_ptr, +- unsigned char* v_ptr, +- int y_stride, +- int uv_stride, +- struct loop_filter_info* lfi); +-#define vp8_loop_filter_bv vp8_loop_filter_bv_c +- +-void vp8_loop_filter_mbh_c(unsigned char* y_ptr, +- unsigned char* u_ptr, +- unsigned char* v_ptr, +- int y_stride, +- int uv_stride, +- struct loop_filter_info* lfi); +-#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c +- +-void vp8_loop_filter_mbv_c(unsigned char* y_ptr, +- unsigned char* u_ptr, +- unsigned char* v_ptr, +- int y_stride, +- int uv_stride, +- struct loop_filter_info* lfi); +-#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c +- +-void vp8_loop_filter_bhs_c(unsigned char* y_ptr, +- int y_stride, +- const unsigned char* blimit); +-#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c +- +-void vp8_loop_filter_bvs_c(unsigned char* y_ptr, +- int y_stride, +- const unsigned char* blimit); +-#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c +- +-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr, +- int y_stride, +- const unsigned char* blimit); +-#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c +- +-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr, +- int y_stride, +- const unsigned char* blimit); +-#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c +- +-int vp8_mbblock_error_c(struct macroblock* mb, int dc); +-#define vp8_mbblock_error vp8_mbblock_error_c +- +-int vp8_mbuverror_c(struct macroblock* mb); +-#define vp8_mbuverror vp8_mbuverror_c +- +-int vp8_refining_search_sad_c(struct macroblock* x, +- struct block* b, +- struct blockd* d, +- union int_mv* ref_mv, +- int error_per_bit, +- int search_range, +- struct variance_vtable* fn_ptr, +- int* mvcost[2], +- union int_mv* center_mv); +-#define vp8_refining_search_sad vp8_refining_search_sad_c +- +-void vp8_regular_quantize_b_c(struct block*, struct blockd*); +-#define vp8_regular_quantize_b vp8_regular_quantize_b_c +- +-void vp8_short_fdct4x4_c(short* input, short* output, int pitch); +-#define vp8_short_fdct4x4 vp8_short_fdct4x4_c +- +-void vp8_short_fdct8x4_c(short* input, short* output, int pitch); +-#define vp8_short_fdct8x4 vp8_short_fdct8x4_c +- +-void vp8_short_idct4x4llm_c(short* input, +- unsigned char* pred_ptr, +- int pred_stride, +- unsigned char* dst_ptr, +- int dst_stride); +-#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c +- +-void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff); +-#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c +- +-void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff); +-#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c +- +-void vp8_short_walsh4x4_c(short* input, short* output, int pitch); +-#define vp8_short_walsh4x4 vp8_short_walsh4x4_c +- +-void vp8_sixtap_predict16x16_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c +- +-void vp8_sixtap_predict4x4_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c +- +-void vp8_sixtap_predict8x4_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c +- +-void vp8_sixtap_predict8x8_c(unsigned char* src_ptr, +- int src_pixels_per_line, +- int xoffset, +- int yoffset, +- unsigned char* dst_ptr, +- int dst_pitch); +-#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c +- +-void vp8_rtcd(void); +- +-#include "vpx_config.h" +- +-#ifdef RTCD_C +-#include "vpx_ports/ppc.h" +-static void setup_rtcd_internal(void) { +- int flags = ppc_simd_caps(); +- (void)flags; +-} +-#endif +- +-#ifdef __cplusplus +-} // extern "C" +-#endif +- +-#endif +diff --git a/third_party/libvpx/source/config/linux/ppc64/vp9_rtcd.h b/third_party/libvpx/source/config/linux/ppc64/vp9_rtcd.h +deleted file mode 100644 +index d0ceec1..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vp9_rtcd.h ++++ /dev/null +@@ -1,193 +0,0 @@ +-// This file is generated. Do not edit. +-#ifndef VP9_RTCD_H_ +-#define VP9_RTCD_H_ +- +-#ifdef RTCD_C +-#define RTCD_EXTERN +-#else +-#define RTCD_EXTERN extern +-#endif +- +-/* +- * VP9 +- */ +- +-#include "vp9/common/vp9_common.h" +-#include "vp9/common/vp9_enums.h" +-#include "vp9/common/vp9_filter.h" +-#include "vpx/vpx_integer.h" +- +-struct macroblockd; +- +-/* Encoder forward decls */ +-struct macroblock; +-struct macroblock_plane; +-struct vp9_sad_table; +-struct ScanOrder; +-struct search_site_config; +-struct mv; +-union int_mv; +-struct yv12_buffer_config; +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-int64_t vp9_block_error_c(const tran_low_t* coeff, +- const tran_low_t* dqcoeff, +- intptr_t block_size, +- int64_t* ssz); +-#define vp9_block_error vp9_block_error_c +- +-int64_t vp9_block_error_fp_c(const tran_low_t* coeff, +- const tran_low_t* dqcoeff, +- int block_size); +-#define vp9_block_error_fp vp9_block_error_fp_c +- +-int vp9_denoiser_filter_c(const uint8_t* sig, +- int sig_stride, +- const uint8_t* mc_avg, +- int mc_avg_stride, +- uint8_t* avg, +- int avg_stride, +- int increase_denoising, +- BLOCK_SIZE bs, +- int motion_magnitude); +-#define vp9_denoiser_filter vp9_denoiser_filter_c +- +-int vp9_diamond_search_sad_c(const struct macroblock* x, +- const struct search_site_config* cfg, +- struct mv* ref_mv, +- uint32_t start_mv_sad, +- struct mv* best_mv, +- int search_param, +- int sad_per_bit, +- int* num00, +- const struct vp9_sad_table* sad_fn_ptr, +- const struct mv* center_mv); +-#define vp9_diamond_search_sad vp9_diamond_search_sad_c +- +-void vp9_fht16x16_c(const int16_t* input, +- tran_low_t* output, +- int stride, +- int tx_type); +-#define vp9_fht16x16 vp9_fht16x16_c +- +-void vp9_fht4x4_c(const int16_t* input, +- tran_low_t* output, +- int stride, +- int tx_type); +-#define vp9_fht4x4 vp9_fht4x4_c +- +-void vp9_fht8x8_c(const int16_t* input, +- tran_low_t* output, +- int stride, +- int tx_type); +-#define vp9_fht8x8 vp9_fht8x8_c +- +-void vp9_filter_by_weight16x16_c(const uint8_t* src, +- int src_stride, +- uint8_t* dst, +- int dst_stride, +- int src_weight); +-#define vp9_filter_by_weight16x16 vp9_filter_by_weight16x16_c +- +-void vp9_filter_by_weight8x8_c(const uint8_t* src, +- int src_stride, +- uint8_t* dst, +- int dst_stride, +- int src_weight); +-#define vp9_filter_by_weight8x8 vp9_filter_by_weight8x8_c +- +-void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); +-#define vp9_fwht4x4 vp9_fwht4x4_c +- +-void vp9_iht16x16_256_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-void vp9_iht16x16_256_add_vsx(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-#define vp9_iht16x16_256_add vp9_iht16x16_256_add_vsx +- +-void vp9_iht4x4_16_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-void vp9_iht4x4_16_add_vsx(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-#define vp9_iht4x4_16_add vp9_iht4x4_16_add_vsx +- +-void vp9_iht8x8_64_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-void vp9_iht8x8_64_add_vsx(const tran_low_t* input, +- uint8_t* dest, +- int stride, +- int tx_type); +-#define vp9_iht8x8_64_add vp9_iht8x8_64_add_vsx +- +-void vp9_quantize_fp_c(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-void vp9_quantize_fp_vsx(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-#define vp9_quantize_fp vp9_quantize_fp_vsx +- +-void vp9_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-void vp9_quantize_fp_32x32_vsx(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-#define vp9_quantize_fp_32x32 vp9_quantize_fp_32x32_vsx +- +-void vp9_scale_and_extend_frame_c(const struct yv12_buffer_config* src, +- struct yv12_buffer_config* dst, +- INTERP_FILTER filter_type, +- int phase_scaler); +-#define vp9_scale_and_extend_frame vp9_scale_and_extend_frame_c +- +-void vp9_rtcd(void); +- +-#include "vpx_config.h" +- +-#ifdef RTCD_C +-#include "vpx_ports/ppc.h" +-static void setup_rtcd_internal(void) { +- int flags = ppc_simd_caps(); +- (void)flags; +-} +-#endif +- +-#ifdef __cplusplus +-} // extern "C" +-#endif +- +-#endif +diff --git a/third_party/libvpx/source/config/linux/ppc64/vpx_config.asm b/third_party/libvpx/source/config/linux/ppc64/vpx_config.asm +deleted file mode 100644 +index cf14597..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vpx_config.asm ++++ /dev/null +@@ -1,107 +0,0 @@ +-@ This file was created from a .asm file +-@ using the ads2gas.pl script. +-.syntax unified +-.equ VPX_ARCH_ARM , 0 +-.equ ARCH_ARM , 0 +-.equ VPX_ARCH_AARCH64 , 0 +-.equ ARCH_AARCH64 , 0 +-.equ VPX_ARCH_MIPS , 0 +-.equ ARCH_MIPS , 0 +-.equ VPX_ARCH_X86 , 0 +-.equ ARCH_X86 , 0 +-.equ VPX_ARCH_X86_64 , 0 +-.equ ARCH_X86_64 , 0 +-.equ VPX_ARCH_PPC , 1 +-.equ ARCH_PPC , 1 +-.equ VPX_ARCH_LOONGARCH , 0 +-.equ ARCH_LOONGARCH , 0 +-.equ HAVE_NEON_ASM , 0 +-.equ HAVE_NEON , 0 +-.equ HAVE_NEON_DOTPROD , 0 +-.equ HAVE_NEON_I8MM , 0 +-.equ HAVE_SVE , 0 +-.equ HAVE_MIPS32 , 0 +-.equ HAVE_DSPR2 , 0 +-.equ HAVE_MSA , 0 +-.equ HAVE_MIPS64 , 0 +-.equ HAVE_MMX , 0 +-.equ HAVE_SSE , 0 +-.equ HAVE_SSE2 , 0 +-.equ HAVE_SSE3 , 0 +-.equ HAVE_SSSE3 , 0 +-.equ HAVE_SSE4_1 , 0 +-.equ HAVE_AVX , 0 +-.equ HAVE_AVX2 , 0 +-.equ HAVE_AVX512 , 0 +-.equ HAVE_VSX , 0 +-.equ HAVE_MMI , 0 +-.equ HAVE_LSX , 0 +-.equ HAVE_LASX , 0 +-.equ HAVE_VPX_PORTS , 1 +-.equ HAVE_PTHREAD_H , 1 +-.equ HAVE_UNISTD_H , 0 +-.equ CONFIG_DEPENDENCY_TRACKING , 1 +-.equ CONFIG_EXTERNAL_BUILD , 1 +-.equ CONFIG_INSTALL_DOCS , 0 +-.equ CONFIG_INSTALL_BINS , 1 +-.equ CONFIG_INSTALL_LIBS , 1 +-.equ CONFIG_INSTALL_SRCS , 0 +-.equ CONFIG_DEBUG , 0 +-.equ CONFIG_GPROF , 0 +-.equ CONFIG_GCOV , 0 +-.equ CONFIG_RVCT , 0 +-.equ CONFIG_GCC , 1 +-.equ CONFIG_MSVS , 0 +-.equ CONFIG_PIC , 0 +-.equ CONFIG_BIG_ENDIAN , 0 +-.equ CONFIG_CODEC_SRCS , 0 +-.equ CONFIG_DEBUG_LIBS , 0 +-.equ CONFIG_DEQUANT_TOKENS , 0 +-.equ CONFIG_DC_RECON , 0 +-.equ CONFIG_RUNTIME_CPU_DETECT , 0 +-.equ CONFIG_POSTPROC , 1 +-.equ CONFIG_VP9_POSTPROC , 1 +-.equ CONFIG_MULTITHREAD , 1 +-.equ CONFIG_INTERNAL_STATS , 0 +-.equ CONFIG_VP8_ENCODER , 1 +-.equ CONFIG_VP8_DECODER , 1 +-.equ CONFIG_VP9_ENCODER , 1 +-.equ CONFIG_VP9_DECODER , 1 +-.equ CONFIG_VP8 , 1 +-.equ CONFIG_VP9 , 1 +-.equ CONFIG_ENCODERS , 1 +-.equ CONFIG_DECODERS , 1 +-.equ CONFIG_STATIC_MSVCRT , 0 +-.equ CONFIG_SPATIAL_RESAMPLING , 1 +-.equ CONFIG_REALTIME_ONLY , 1 +-.equ CONFIG_ONTHEFLY_BITPACKING , 0 +-.equ CONFIG_ERROR_CONCEALMENT , 0 +-.equ CONFIG_SHARED , 0 +-.equ CONFIG_STATIC , 1 +-.equ CONFIG_SMALL , 0 +-.equ CONFIG_POSTPROC_VISUALIZER , 0 +-.equ CONFIG_OS_SUPPORT , 1 +-.equ CONFIG_UNIT_TESTS , 1 +-.equ CONFIG_WEBM_IO , 1 +-.equ CONFIG_LIBYUV , 0 +-.equ CONFIG_DECODE_PERF_TESTS , 0 +-.equ CONFIG_ENCODE_PERF_TESTS , 0 +-.equ CONFIG_MULTI_RES_ENCODING , 1 +-.equ CONFIG_TEMPORAL_DENOISING , 1 +-.equ CONFIG_VP9_TEMPORAL_DENOISING , 1 +-.equ CONFIG_COEFFICIENT_RANGE_CHECKING , 0 +-.equ CONFIG_VP9_HIGHBITDEPTH , 0 +-.equ CONFIG_BETTER_HW_COMPATIBILITY , 0 +-.equ CONFIG_EXPERIMENTAL , 0 +-.equ CONFIG_SIZE_LIMIT , 1 +-.equ CONFIG_ALWAYS_ADJUST_BPM , 0 +-.equ CONFIG_BITSTREAM_DEBUG , 0 +-.equ CONFIG_MISMATCH_DEBUG , 0 +-.equ CONFIG_FP_MB_STATS , 0 +-.equ CONFIG_EMULATE_HARDWARE , 0 +-.equ CONFIG_NON_GREEDY_MV , 0 +-.equ CONFIG_RATE_CTRL , 0 +-.equ CONFIG_COLLECT_COMPONENT_TIMING , 0 +-.equ DECODE_WIDTH_LIMIT , 16384 +-.equ DECODE_HEIGHT_LIMIT , 16384 +- .section .note.GNU-stack,"",%progbits +diff --git a/third_party/libvpx/source/config/linux/ppc64/vpx_config.c b/third_party/libvpx/source/config/linux/ppc64/vpx_config.c +deleted file mode 100644 +index 9b870e7..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vpx_config.c ++++ /dev/null +@@ -1,10 +0,0 @@ +-/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ +-/* */ +-/* Use of this source code is governed by a BSD-style license */ +-/* that can be found in the LICENSE file in the root of the source */ +-/* tree. An additional intellectual property rights grant can be found */ +-/* in the file PATENTS. All contributing project authors may */ +-/* be found in the AUTHORS file in the root of the source tree. */ +-#include "vpx/vpx_codec.h" +-static const char* const cfg = "--target=ppc64le-linux-gcc --enable-external-build --enable-postproc --enable-multi-res-encoding --enable-temporal-denoising --enable-vp9-temporal-denoising --enable-vp9-postproc --size-limit=16384x16384 --enable-realtime-only --disable-install-docs --disable-libyuv"; +-const char *vpx_codec_build_config(void) {return cfg;} +diff --git a/third_party/libvpx/source/config/linux/ppc64/vpx_config.h b/third_party/libvpx/source/config/linux/ppc64/vpx_config.h +deleted file mode 100644 +index 8be04cc..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vpx_config.h ++++ /dev/null +@@ -1,116 +0,0 @@ +-/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ +-/* */ +-/* Use of this source code is governed by a BSD-style license */ +-/* that can be found in the LICENSE file in the root of the source */ +-/* tree. An additional intellectual property rights grant can be found */ +-/* in the file PATENTS. All contributing project authors may */ +-/* be found in the AUTHORS file in the root of the source tree. */ +-/* This file automatically generated by configure. Do not edit! */ +-#ifndef VPX_CONFIG_H +-#define VPX_CONFIG_H +-#define RESTRICT +-#define INLINE inline +-#define VPX_ARCH_ARM 0 +-#define ARCH_ARM 0 +-#define VPX_ARCH_AARCH64 0 +-#define ARCH_AARCH64 0 +-#define VPX_ARCH_MIPS 0 +-#define ARCH_MIPS 0 +-#define VPX_ARCH_X86 0 +-#define ARCH_X86 0 +-#define VPX_ARCH_X86_64 0 +-#define ARCH_X86_64 0 +-#define VPX_ARCH_PPC 1 +-#define ARCH_PPC 1 +-#define VPX_ARCH_LOONGARCH 0 +-#define ARCH_LOONGARCH 0 +-#define HAVE_NEON_ASM 0 +-#define HAVE_NEON 0 +-#define HAVE_NEON_DOTPROD 0 +-#define HAVE_NEON_I8MM 0 +-#define HAVE_SVE 0 +-#define HAVE_MIPS32 0 +-#define HAVE_DSPR2 0 +-#define HAVE_MSA 0 +-#define HAVE_MIPS64 0 +-#define HAVE_MMX 0 +-#define HAVE_SSE 0 +-#define HAVE_SSE2 0 +-#define HAVE_SSE3 0 +-#define HAVE_SSSE3 0 +-#define HAVE_SSE4_1 0 +-#define HAVE_AVX 0 +-#define HAVE_AVX2 0 +-#define HAVE_AVX512 0 +-#define HAVE_VSX 0 +-#define HAVE_MMI 0 +-#define HAVE_LSX 0 +-#define HAVE_LASX 0 +-#define HAVE_VPX_PORTS 1 +-#define HAVE_PTHREAD_H 1 +-#define HAVE_UNISTD_H 0 +-#define CONFIG_DEPENDENCY_TRACKING 1 +-#define CONFIG_EXTERNAL_BUILD 1 +-#define CONFIG_INSTALL_DOCS 0 +-#define CONFIG_INSTALL_BINS 1 +-#define CONFIG_INSTALL_LIBS 1 +-#define CONFIG_INSTALL_SRCS 0 +-#define CONFIG_DEBUG 0 +-#define CONFIG_GPROF 0 +-#define CONFIG_GCOV 0 +-#define CONFIG_RVCT 0 +-#define CONFIG_GCC 1 +-#define CONFIG_MSVS 0 +-#define CONFIG_PIC 0 +-#define CONFIG_BIG_ENDIAN 0 +-#define CONFIG_CODEC_SRCS 0 +-#define CONFIG_DEBUG_LIBS 0 +-#define CONFIG_DEQUANT_TOKENS 0 +-#define CONFIG_DC_RECON 0 +-#define CONFIG_RUNTIME_CPU_DETECT 0 +-#define CONFIG_POSTPROC 1 +-#define CONFIG_VP9_POSTPROC 1 +-#define CONFIG_MULTITHREAD 1 +-#define CONFIG_INTERNAL_STATS 0 +-#define CONFIG_VP8_ENCODER 1 +-#define CONFIG_VP8_DECODER 1 +-#define CONFIG_VP9_ENCODER 1 +-#define CONFIG_VP9_DECODER 1 +-#define CONFIG_VP8 1 +-#define CONFIG_VP9 1 +-#define CONFIG_ENCODERS 1 +-#define CONFIG_DECODERS 1 +-#define CONFIG_STATIC_MSVCRT 0 +-#define CONFIG_SPATIAL_RESAMPLING 1 +-#define CONFIG_REALTIME_ONLY 1 +-#define CONFIG_ONTHEFLY_BITPACKING 0 +-#define CONFIG_ERROR_CONCEALMENT 0 +-#define CONFIG_SHARED 0 +-#define CONFIG_STATIC 1 +-#define CONFIG_SMALL 0 +-#define CONFIG_POSTPROC_VISUALIZER 0 +-#define CONFIG_OS_SUPPORT 1 +-#define CONFIG_UNIT_TESTS 1 +-#define CONFIG_WEBM_IO 1 +-#define CONFIG_LIBYUV 0 +-#define CONFIG_DECODE_PERF_TESTS 0 +-#define CONFIG_ENCODE_PERF_TESTS 0 +-#define CONFIG_MULTI_RES_ENCODING 1 +-#define CONFIG_TEMPORAL_DENOISING 1 +-#define CONFIG_VP9_TEMPORAL_DENOISING 1 +-#define CONFIG_COEFFICIENT_RANGE_CHECKING 0 +-#define CONFIG_VP9_HIGHBITDEPTH 0 +-#define CONFIG_BETTER_HW_COMPATIBILITY 0 +-#define CONFIG_EXPERIMENTAL 0 +-#define CONFIG_SIZE_LIMIT 1 +-#define CONFIG_ALWAYS_ADJUST_BPM 0 +-#define CONFIG_BITSTREAM_DEBUG 0 +-#define CONFIG_MISMATCH_DEBUG 0 +-#define CONFIG_FP_MB_STATS 0 +-#define CONFIG_EMULATE_HARDWARE 0 +-#define CONFIG_NON_GREEDY_MV 0 +-#define CONFIG_RATE_CTRL 0 +-#define CONFIG_COLLECT_COMPONENT_TIMING 0 +-#define DECODE_WIDTH_LIMIT 16384 +-#define DECODE_HEIGHT_LIMIT 16384 +-#endif /* VPX_CONFIG_H */ +diff --git a/third_party/libvpx/source/config/linux/ppc64/vpx_dsp_rtcd.h b/third_party/libvpx/source/config/linux/ppc64/vpx_dsp_rtcd.h +deleted file mode 100644 +index 8749b43..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vpx_dsp_rtcd.h ++++ /dev/null +@@ -1,2128 +0,0 @@ +-// This file is generated. Do not edit. +-#ifndef VPX_DSP_RTCD_H_ +-#define VPX_DSP_RTCD_H_ +- +-#ifdef RTCD_C +-#define RTCD_EXTERN +-#else +-#define RTCD_EXTERN extern +-#endif +- +-/* +- * DSP +- */ +- +-#include "vpx/vpx_integer.h" +-#include "vpx_dsp/vpx_dsp_common.h" +-#include "vpx_dsp/vpx_filter.h" +-#if CONFIG_VP9_ENCODER +-struct macroblock_plane; +-struct ScanOrder; +-#endif +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-unsigned int vpx_avg_4x4_c(const uint8_t*, int p); +-#define vpx_avg_4x4 vpx_avg_4x4_c +- +-unsigned int vpx_avg_8x8_c(const uint8_t*, int p); +-#define vpx_avg_8x8 vpx_avg_8x8_c +- +-void vpx_comp_avg_pred_c(uint8_t* comp_pred, +- const uint8_t* pred, +- int width, +- int height, +- const uint8_t* ref, +- int ref_stride); +-void vpx_comp_avg_pred_vsx(uint8_t* comp_pred, +- const uint8_t* pred, +- int width, +- int height, +- const uint8_t* ref, +- int ref_stride); +-#define vpx_comp_avg_pred vpx_comp_avg_pred_vsx +- +-void vpx_convolve8_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8 vpx_convolve8_vsx +- +-void vpx_convolve8_avg_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_avg_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8_avg vpx_convolve8_avg_vsx +- +-void vpx_convolve8_avg_horiz_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_avg_horiz_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8_avg_horiz vpx_convolve8_avg_horiz_vsx +- +-void vpx_convolve8_avg_vert_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_avg_vert_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8_avg_vert vpx_convolve8_avg_vert_vsx +- +-void vpx_convolve8_horiz_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_horiz_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8_horiz vpx_convolve8_horiz_vsx +- +-void vpx_convolve8_vert_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve8_vert_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve8_vert vpx_convolve8_vert_vsx +- +-void vpx_convolve_avg_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve_avg_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve_avg vpx_convolve_avg_vsx +- +-void vpx_convolve_copy_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-void vpx_convolve_copy_vsx(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_convolve_copy vpx_convolve_copy_vsx +- +-void vpx_d117_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c +- +-void vpx_d117_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c +- +-void vpx_d117_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c +- +-void vpx_d117_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c +- +-void vpx_d135_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c +- +-void vpx_d135_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c +- +-void vpx_d135_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c +- +-void vpx_d135_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c +- +-void vpx_d153_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c +- +-void vpx_d153_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c +- +-void vpx_d153_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c +- +-void vpx_d153_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c +- +-void vpx_d207_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c +- +-void vpx_d207_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c +- +-void vpx_d207_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c +- +-void vpx_d207_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c +- +-void vpx_d45_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_d45_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_vsx +- +-void vpx_d45_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_d45_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_vsx +- +-void vpx_d45_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c +- +-void vpx_d45_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c +- +-void vpx_d45e_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c +- +-void vpx_d63_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_d63_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_vsx +- +-void vpx_d63_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_d63_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_vsx +- +-void vpx_d63_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c +- +-void vpx_d63_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c +- +-void vpx_d63e_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c +- +-void vpx_dc_128_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_128_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_vsx +- +-void vpx_dc_128_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_128_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_vsx +- +-void vpx_dc_128_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c +- +-void vpx_dc_128_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c +- +-void vpx_dc_left_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_left_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_vsx +- +-void vpx_dc_left_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_left_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_vsx +- +-void vpx_dc_left_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c +- +-void vpx_dc_left_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c +- +-void vpx_dc_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_vsx +- +-void vpx_dc_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_vsx +- +-void vpx_dc_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c +- +-void vpx_dc_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c +- +-void vpx_dc_top_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_top_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_vsx +- +-void vpx_dc_top_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_dc_top_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_vsx +- +-void vpx_dc_top_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c +- +-void vpx_dc_top_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c +- +-void vpx_fdct16x16_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct16x16 vpx_fdct16x16_c +- +-void vpx_fdct16x16_1_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct16x16_1 vpx_fdct16x16_1_c +- +-void vpx_fdct32x32_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct32x32 vpx_fdct32x32_c +- +-void vpx_fdct32x32_1_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct32x32_1 vpx_fdct32x32_1_c +- +-void vpx_fdct32x32_rd_c(const int16_t* input, tran_low_t* output, int stride); +-void vpx_fdct32x32_rd_vsx(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct32x32_rd vpx_fdct32x32_rd_vsx +- +-void vpx_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct4x4 vpx_fdct4x4_c +- +-void vpx_fdct4x4_1_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct4x4_1 vpx_fdct4x4_1_c +- +-void vpx_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct8x8 vpx_fdct8x8_c +- +-void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride); +-#define vpx_fdct8x8_1 vpx_fdct8x8_1_c +- +-void vpx_get16x16var_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse, +- int* sum); +-void vpx_get16x16var_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse, +- int* sum); +-#define vpx_get16x16var vpx_get16x16var_vsx +- +-unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr, +- int src_stride, +- const unsigned char* ref_ptr, +- int ref_stride); +-unsigned int vpx_get4x4sse_cs_vsx(const unsigned char* src_ptr, +- int src_stride, +- const unsigned char* ref_ptr, +- int ref_stride); +-#define vpx_get4x4sse_cs vpx_get4x4sse_cs_vsx +- +-void vpx_get8x8var_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse, +- int* sum); +-void vpx_get8x8var_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse, +- int* sum); +-#define vpx_get8x8var vpx_get8x8var_vsx +- +-unsigned int vpx_get_mb_ss_c(const int16_t*); +-unsigned int vpx_get_mb_ss_vsx(const int16_t*); +-#define vpx_get_mb_ss vpx_get_mb_ss_vsx +- +-void vpx_h_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_h_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_vsx +- +-void vpx_h_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_h_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_vsx +- +-void vpx_h_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c +- +-void vpx_h_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c +- +-void vpx_hadamard_16x16_c(const int16_t* src_diff, +- ptrdiff_t src_stride, +- int16_t* coeff); +-void vpx_hadamard_16x16_vsx(const int16_t* src_diff, +- ptrdiff_t src_stride, +- int16_t* coeff); +-#define vpx_hadamard_16x16 vpx_hadamard_16x16_vsx +- +-void vpx_hadamard_32x32_c(const int16_t* src_diff, +- ptrdiff_t src_stride, +- int16_t* coeff); +-#define vpx_hadamard_32x32 vpx_hadamard_32x32_c +- +-void vpx_hadamard_8x8_c(const int16_t* src_diff, +- ptrdiff_t src_stride, +- int16_t* coeff); +-void vpx_hadamard_8x8_vsx(const int16_t* src_diff, +- ptrdiff_t src_stride, +- int16_t* coeff); +-#define vpx_hadamard_8x8 vpx_hadamard_8x8_vsx +- +-void vpx_he_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c +- +-void vpx_idct16x16_10_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct16x16_10_add vpx_idct16x16_10_add_c +- +-void vpx_idct16x16_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct16x16_1_add vpx_idct16x16_1_add_c +- +-void vpx_idct16x16_256_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride); +-void vpx_idct16x16_256_add_vsx(const tran_low_t* input, +- uint8_t* dest, +- int stride); +-#define vpx_idct16x16_256_add vpx_idct16x16_256_add_vsx +- +-void vpx_idct16x16_38_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct16x16_38_add vpx_idct16x16_38_add_c +- +-void vpx_idct32x32_1024_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride); +-void vpx_idct32x32_1024_add_vsx(const tran_low_t* input, +- uint8_t* dest, +- int stride); +-#define vpx_idct32x32_1024_add vpx_idct32x32_1024_add_vsx +- +-void vpx_idct32x32_135_add_c(const tran_low_t* input, +- uint8_t* dest, +- int stride); +-#define vpx_idct32x32_135_add vpx_idct32x32_135_add_c +- +-void vpx_idct32x32_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct32x32_1_add vpx_idct32x32_1_add_c +- +-void vpx_idct32x32_34_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct32x32_34_add vpx_idct32x32_34_add_c +- +-void vpx_idct4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-void vpx_idct4x4_16_add_vsx(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct4x4_16_add vpx_idct4x4_16_add_vsx +- +-void vpx_idct4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct4x4_1_add vpx_idct4x4_1_add_c +- +-void vpx_idct8x8_12_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct8x8_12_add vpx_idct8x8_12_add_c +- +-void vpx_idct8x8_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct8x8_1_add vpx_idct8x8_1_add_c +- +-void vpx_idct8x8_64_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-void vpx_idct8x8_64_add_vsx(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_idct8x8_64_add vpx_idct8x8_64_add_vsx +- +-int16_t vpx_int_pro_col_c(const uint8_t* ref, const int width); +-#define vpx_int_pro_col vpx_int_pro_col_c +- +-void vpx_int_pro_row_c(int16_t hbuf[16], +- const uint8_t* ref, +- const int ref_stride, +- const int height); +-#define vpx_int_pro_row vpx_int_pro_row_c +- +-void vpx_iwht4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-void vpx_iwht4x4_16_add_vsx(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_vsx +- +-void vpx_iwht4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); +-#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c +- +-void vpx_lpf_horizontal_16_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_horizontal_16 vpx_lpf_horizontal_16_c +- +-void vpx_lpf_horizontal_16_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_horizontal_16_dual vpx_lpf_horizontal_16_dual_c +- +-void vpx_lpf_horizontal_4_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_horizontal_4 vpx_lpf_horizontal_4_c +- +-void vpx_lpf_horizontal_4_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit0, +- const uint8_t* limit0, +- const uint8_t* thresh0, +- const uint8_t* blimit1, +- const uint8_t* limit1, +- const uint8_t* thresh1); +-#define vpx_lpf_horizontal_4_dual vpx_lpf_horizontal_4_dual_c +- +-void vpx_lpf_horizontal_8_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_horizontal_8 vpx_lpf_horizontal_8_c +- +-void vpx_lpf_horizontal_8_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit0, +- const uint8_t* limit0, +- const uint8_t* thresh0, +- const uint8_t* blimit1, +- const uint8_t* limit1, +- const uint8_t* thresh1); +-#define vpx_lpf_horizontal_8_dual vpx_lpf_horizontal_8_dual_c +- +-void vpx_lpf_vertical_16_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_vertical_16 vpx_lpf_vertical_16_c +- +-void vpx_lpf_vertical_16_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_vertical_16_dual vpx_lpf_vertical_16_dual_c +- +-void vpx_lpf_vertical_4_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_vertical_4 vpx_lpf_vertical_4_c +- +-void vpx_lpf_vertical_4_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit0, +- const uint8_t* limit0, +- const uint8_t* thresh0, +- const uint8_t* blimit1, +- const uint8_t* limit1, +- const uint8_t* thresh1); +-#define vpx_lpf_vertical_4_dual vpx_lpf_vertical_4_dual_c +- +-void vpx_lpf_vertical_8_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit, +- const uint8_t* limit, +- const uint8_t* thresh); +-#define vpx_lpf_vertical_8 vpx_lpf_vertical_8_c +- +-void vpx_lpf_vertical_8_dual_c(uint8_t* s, +- int pitch, +- const uint8_t* blimit0, +- const uint8_t* limit0, +- const uint8_t* thresh0, +- const uint8_t* blimit1, +- const uint8_t* limit1, +- const uint8_t* thresh1); +-#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c +- +-void vpx_mbpost_proc_across_ip_c(unsigned char* src, +- int pitch, +- int rows, +- int cols, +- int flimit); +-void vpx_mbpost_proc_across_ip_vsx(unsigned char* src, +- int pitch, +- int rows, +- int cols, +- int flimit); +-#define vpx_mbpost_proc_across_ip vpx_mbpost_proc_across_ip_vsx +- +-void vpx_mbpost_proc_down_c(unsigned char* dst, +- int pitch, +- int rows, +- int cols, +- int flimit); +-void vpx_mbpost_proc_down_vsx(unsigned char* dst, +- int pitch, +- int rows, +- int cols, +- int flimit); +-#define vpx_mbpost_proc_down vpx_mbpost_proc_down_vsx +- +-void vpx_minmax_8x8_c(const uint8_t* s, +- int p, +- const uint8_t* d, +- int dp, +- int* min, +- int* max); +-#define vpx_minmax_8x8 vpx_minmax_8x8_c +- +-unsigned int vpx_mse16x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_mse16x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_mse16x16 vpx_mse16x16_vsx +- +-unsigned int vpx_mse16x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_mse16x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_mse16x8 vpx_mse16x8_vsx +- +-unsigned int vpx_mse8x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_mse8x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_mse8x16 vpx_mse8x16_vsx +- +-unsigned int vpx_mse8x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_mse8x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_mse8x8 vpx_mse8x8_vsx +- +-void vpx_plane_add_noise_c(uint8_t* start, +- const int8_t* noise, +- int blackclamp, +- int whiteclamp, +- int width, +- int height, +- int pitch); +-#define vpx_plane_add_noise vpx_plane_add_noise_c +- +-void vpx_post_proc_down_and_across_mb_row_c(unsigned char* src, +- unsigned char* dst, +- int src_pitch, +- int dst_pitch, +- int cols, +- unsigned char* flimits, +- int size); +-void vpx_post_proc_down_and_across_mb_row_vsx(unsigned char* src, +- unsigned char* dst, +- int src_pitch, +- int dst_pitch, +- int cols, +- unsigned char* flimits, +- int size); +-#define vpx_post_proc_down_and_across_mb_row \ +- vpx_post_proc_down_and_across_mb_row_vsx +- +-void vpx_quantize_b_c(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-void vpx_quantize_b_vsx(const tran_low_t* coeff_ptr, +- intptr_t n_coeffs, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-#define vpx_quantize_b vpx_quantize_b_vsx +- +-void vpx_quantize_b_32x32_c(const tran_low_t* coeff_ptr, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-void vpx_quantize_b_32x32_vsx(const tran_low_t* coeff_ptr, +- const struct macroblock_plane* const mb_plane, +- tran_low_t* qcoeff_ptr, +- tran_low_t* dqcoeff_ptr, +- const int16_t* dequant_ptr, +- uint16_t* eob_ptr, +- const struct ScanOrder* const scan_order); +-#define vpx_quantize_b_32x32 vpx_quantize_b_32x32_vsx +- +-unsigned int vpx_sad16x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad16x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad16x16 vpx_sad16x16_vsx +- +-unsigned int vpx_sad16x16_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad16x16_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad16x16_avg vpx_sad16x16_avg_vsx +- +-void vpx_sad16x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad16x16x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad16x16x4d vpx_sad16x16x4d_vsx +- +-unsigned int vpx_sad16x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad16x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad16x32 vpx_sad16x32_vsx +- +-unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad16x32_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad16x32_avg vpx_sad16x32_avg_vsx +- +-void vpx_sad16x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad16x32x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad16x32x4d vpx_sad16x32x4d_vsx +- +-unsigned int vpx_sad16x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad16x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad16x8 vpx_sad16x8_vsx +- +-unsigned int vpx_sad16x8_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad16x8_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad16x8_avg vpx_sad16x8_avg_vsx +- +-void vpx_sad16x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad16x8x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad16x8x4d vpx_sad16x8x4d_vsx +- +-unsigned int vpx_sad32x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad32x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad32x16 vpx_sad32x16_vsx +- +-unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad32x16_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad32x16_avg vpx_sad32x16_avg_vsx +- +-void vpx_sad32x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad32x16x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad32x16x4d vpx_sad32x16x4d_vsx +- +-unsigned int vpx_sad32x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad32x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad32x32 vpx_sad32x32_vsx +- +-unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad32x32_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad32x32_avg vpx_sad32x32_avg_vsx +- +-void vpx_sad32x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad32x32x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad32x32x4d vpx_sad32x32x4d_vsx +- +-unsigned int vpx_sad32x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad32x64_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad32x64 vpx_sad32x64_vsx +- +-unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad32x64_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad32x64_avg vpx_sad32x64_avg_vsx +- +-void vpx_sad32x64x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad32x64x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad32x64x4d vpx_sad32x64x4d_vsx +- +-unsigned int vpx_sad4x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad4x4 vpx_sad4x4_c +- +-unsigned int vpx_sad4x4_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad4x4_avg vpx_sad4x4_avg_c +- +-void vpx_sad4x4x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad4x4x4d vpx_sad4x4x4d_c +- +-unsigned int vpx_sad4x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad4x8 vpx_sad4x8_c +- +-unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad4x8_avg vpx_sad4x8_avg_c +- +-void vpx_sad4x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad4x8x4d vpx_sad4x8x4d_c +- +-unsigned int vpx_sad64x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad64x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad64x32 vpx_sad64x32_vsx +- +-unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad64x32_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad64x32_avg vpx_sad64x32_avg_vsx +- +-void vpx_sad64x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad64x32x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad64x32x4d vpx_sad64x32x4d_vsx +- +-unsigned int vpx_sad64x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad64x64_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad64x64 vpx_sad64x64_vsx +- +-unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-unsigned int vpx_sad64x64_avg_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad64x64_avg vpx_sad64x64_avg_vsx +- +-void vpx_sad64x64x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-void vpx_sad64x64x4d_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad64x64x4d vpx_sad64x64x4d_vsx +- +-unsigned int vpx_sad8x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad8x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad8x16 vpx_sad8x16_vsx +- +-unsigned int vpx_sad8x16_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad8x16_avg vpx_sad8x16_avg_c +- +-void vpx_sad8x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad8x16x4d vpx_sad8x16x4d_c +- +-unsigned int vpx_sad8x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad8x4_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad8x4 vpx_sad8x4_vsx +- +-unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad8x4_avg vpx_sad8x4_avg_c +- +-void vpx_sad8x4x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad8x4x4d vpx_sad8x4x4d_c +- +-unsigned int vpx_sad8x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-unsigned int vpx_sad8x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad8x8 vpx_sad8x8_vsx +- +-unsigned int vpx_sad8x8_avg_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- const uint8_t* second_pred); +-#define vpx_sad8x8_avg vpx_sad8x8_avg_c +- +-void vpx_sad8x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad8x8x4d vpx_sad8x8x4d_c +- +-unsigned int vpx_sad_skip_16x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_16x16 vpx_sad_skip_16x16_c +- +-void vpx_sad_skip_16x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_16x16x4d vpx_sad_skip_16x16x4d_c +- +-unsigned int vpx_sad_skip_16x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_16x32 vpx_sad_skip_16x32_c +- +-void vpx_sad_skip_16x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_16x32x4d vpx_sad_skip_16x32x4d_c +- +-unsigned int vpx_sad_skip_16x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_16x8 vpx_sad_skip_16x8_c +- +-void vpx_sad_skip_16x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_16x8x4d vpx_sad_skip_16x8x4d_c +- +-unsigned int vpx_sad_skip_32x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_32x16 vpx_sad_skip_32x16_c +- +-void vpx_sad_skip_32x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_32x16x4d vpx_sad_skip_32x16x4d_c +- +-unsigned int vpx_sad_skip_32x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_32x32 vpx_sad_skip_32x32_c +- +-void vpx_sad_skip_32x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_32x32x4d vpx_sad_skip_32x32x4d_c +- +-unsigned int vpx_sad_skip_32x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_32x64 vpx_sad_skip_32x64_c +- +-void vpx_sad_skip_32x64x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_32x64x4d vpx_sad_skip_32x64x4d_c +- +-unsigned int vpx_sad_skip_4x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_4x4 vpx_sad_skip_4x4_c +- +-void vpx_sad_skip_4x4x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_4x4x4d vpx_sad_skip_4x4x4d_c +- +-unsigned int vpx_sad_skip_4x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_4x8 vpx_sad_skip_4x8_c +- +-void vpx_sad_skip_4x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_4x8x4d vpx_sad_skip_4x8x4d_c +- +-unsigned int vpx_sad_skip_64x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_64x32 vpx_sad_skip_64x32_c +- +-void vpx_sad_skip_64x32x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_64x32x4d vpx_sad_skip_64x32x4d_c +- +-unsigned int vpx_sad_skip_64x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_64x64 vpx_sad_skip_64x64_c +- +-void vpx_sad_skip_64x64x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_64x64x4d vpx_sad_skip_64x64x4d_c +- +-unsigned int vpx_sad_skip_8x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_8x16 vpx_sad_skip_8x16_c +- +-void vpx_sad_skip_8x16x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_8x16x4d vpx_sad_skip_8x16x4d_c +- +-unsigned int vpx_sad_skip_8x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_8x4 vpx_sad_skip_8x4_c +- +-void vpx_sad_skip_8x4x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_8x4x4d vpx_sad_skip_8x4x4d_c +- +-unsigned int vpx_sad_skip_8x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride); +-#define vpx_sad_skip_8x8 vpx_sad_skip_8x8_c +- +-void vpx_sad_skip_8x8x4d_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* const ref_array[4], +- int ref_stride, +- uint32_t sad_array[4]); +-#define vpx_sad_skip_8x8x4d vpx_sad_skip_8x8x4d_c +- +-int vpx_satd_c(const int16_t* coeff, int length); +-#define vpx_satd vpx_satd_c +- +-void vpx_scaled_2d_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_2d vpx_scaled_2d_c +- +-void vpx_scaled_avg_2d_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c +- +-void vpx_scaled_avg_horiz_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c +- +-void vpx_scaled_avg_vert_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c +- +-void vpx_scaled_horiz_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_horiz vpx_scaled_horiz_c +- +-void vpx_scaled_vert_c(const uint8_t* src, +- ptrdiff_t src_stride, +- uint8_t* dst, +- ptrdiff_t dst_stride, +- const InterpKernel* filter, +- int x0_q4, +- int x_step_q4, +- int y0_q4, +- int y_step_q4, +- int w, +- int h); +-#define vpx_scaled_vert vpx_scaled_vert_c +- +-int64_t vpx_sse_c(const uint8_t* src, +- int src_stride, +- const uint8_t* ref, +- int ref_stride, +- int width, +- int height); +-#define vpx_sse vpx_sse_c +- +-uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c +- +-uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c +- +-uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c +- +-uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c +- +-uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c +- +-uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c +- +-uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c +- +-uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c +- +-uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c +- +-uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c +- +-uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c +- +-uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c +- +-uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse, +- const uint8_t* second_pred); +-#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c +- +-uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c +- +-uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c +- +-uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c +- +-uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c +- +-uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c +- +-uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c +- +-uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c +- +-uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c +- +-uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c +- +-uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c +- +-uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c +- +-uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c +- +-uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr, +- int src_stride, +- int x_offset, +- int y_offset, +- const uint8_t* ref_ptr, +- int ref_stride, +- uint32_t* sse); +-#define vpx_sub_pixel_variance8x8 vpx_sub_pixel_variance8x8_c +- +-void vpx_subtract_block_c(int rows, +- int cols, +- int16_t* diff_ptr, +- ptrdiff_t diff_stride, +- const uint8_t* src_ptr, +- ptrdiff_t src_stride, +- const uint8_t* pred_ptr, +- ptrdiff_t pred_stride); +-void vpx_subtract_block_vsx(int rows, +- int cols, +- int16_t* diff_ptr, +- ptrdiff_t diff_stride, +- const uint8_t* src_ptr, +- ptrdiff_t src_stride, +- const uint8_t* pred_ptr, +- ptrdiff_t pred_stride); +-#define vpx_subtract_block vpx_subtract_block_vsx +- +-uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size); +-#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c +- +-void vpx_tm_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_tm_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_vsx +- +-void vpx_tm_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_tm_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_vsx +- +-void vpx_tm_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c +- +-void vpx_tm_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c +- +-void vpx_v_predictor_16x16_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_v_predictor_16x16_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_vsx +- +-void vpx_v_predictor_32x32_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-void vpx_v_predictor_32x32_vsx(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_vsx +- +-void vpx_v_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c +- +-void vpx_v_predictor_8x8_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c +- +-unsigned int vpx_variance16x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance16x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance16x16 vpx_variance16x16_vsx +- +-unsigned int vpx_variance16x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance16x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance16x32 vpx_variance16x32_vsx +- +-unsigned int vpx_variance16x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance16x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance16x8 vpx_variance16x8_vsx +- +-unsigned int vpx_variance32x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance32x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance32x16 vpx_variance32x16_vsx +- +-unsigned int vpx_variance32x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance32x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance32x32 vpx_variance32x32_vsx +- +-unsigned int vpx_variance32x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance32x64_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance32x64 vpx_variance32x64_vsx +- +-unsigned int vpx_variance4x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance4x4_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance4x4 vpx_variance4x4_vsx +- +-unsigned int vpx_variance4x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance4x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance4x8 vpx_variance4x8_vsx +- +-unsigned int vpx_variance64x32_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance64x32_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance64x32 vpx_variance64x32_vsx +- +-unsigned int vpx_variance64x64_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance64x64_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance64x64 vpx_variance64x64_vsx +- +-unsigned int vpx_variance8x16_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance8x16_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance8x16 vpx_variance8x16_vsx +- +-unsigned int vpx_variance8x4_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance8x4_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance8x4 vpx_variance8x4_vsx +- +-unsigned int vpx_variance8x8_c(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-unsigned int vpx_variance8x8_vsx(const uint8_t* src_ptr, +- int src_stride, +- const uint8_t* ref_ptr, +- int ref_stride, +- unsigned int* sse); +-#define vpx_variance8x8 vpx_variance8x8_vsx +- +-void vpx_ve_predictor_4x4_c(uint8_t* dst, +- ptrdiff_t stride, +- const uint8_t* above, +- const uint8_t* left); +-#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c +- +-int vpx_vector_var_c(const int16_t* ref, const int16_t* src, const int bwl); +-#define vpx_vector_var vpx_vector_var_c +- +-void vpx_dsp_rtcd(void); +- +-#include "vpx_config.h" +- +-#ifdef RTCD_C +-#include "vpx_ports/ppc.h" +-static void setup_rtcd_internal(void) { +- int flags = ppc_simd_caps(); +- (void)flags; +-} +-#endif +- +-#ifdef __cplusplus +-} // extern "C" +-#endif +- +-#endif +diff --git a/third_party/libvpx/source/config/linux/ppc64/vpx_scale_rtcd.h b/third_party/libvpx/source/config/linux/ppc64/vpx_scale_rtcd.h +deleted file mode 100644 +index 37be459..0000000 +--- a/third_party/libvpx/source/config/linux/ppc64/vpx_scale_rtcd.h ++++ /dev/null +@@ -1,100 +0,0 @@ +-// This file is generated. Do not edit. +-#ifndef VPX_SCALE_RTCD_H_ +-#define VPX_SCALE_RTCD_H_ +- +-#ifdef RTCD_C +-#define RTCD_EXTERN +-#else +-#define RTCD_EXTERN extern +-#endif +- +-struct yv12_buffer_config; +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-void vp8_horizontal_line_2_1_scale_c(const unsigned char* source, +- unsigned int source_width, +- unsigned char* dest, +- unsigned int dest_width); +-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c +- +-void vp8_horizontal_line_5_3_scale_c(const unsigned char* source, +- unsigned int source_width, +- unsigned char* dest, +- unsigned int dest_width); +-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c +- +-void vp8_horizontal_line_5_4_scale_c(const unsigned char* source, +- unsigned int source_width, +- unsigned char* dest, +- unsigned int dest_width); +-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c +- +-void vp8_vertical_band_2_1_scale_c(unsigned char* source, +- unsigned int src_pitch, +- unsigned char* dest, +- unsigned int dest_pitch, +- unsigned int dest_width); +-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c +- +-void vp8_vertical_band_2_1_scale_i_c(unsigned char* source, +- unsigned int src_pitch, +- unsigned char* dest, +- unsigned int dest_pitch, +- unsigned int dest_width); +-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c +- +-void vp8_vertical_band_5_3_scale_c(unsigned char* source, +- unsigned int src_pitch, +- unsigned char* dest, +- unsigned int dest_pitch, +- unsigned int dest_width); +-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c +- +-void vp8_vertical_band_5_4_scale_c(unsigned char* source, +- unsigned int src_pitch, +- unsigned char* dest, +- unsigned int dest_pitch, +- unsigned int dest_width); +-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c +- +-void vp8_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, +- struct yv12_buffer_config* dst_ybc); +-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c +- +-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config* ybf); +-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c +- +-void vpx_extend_frame_borders_c(struct yv12_buffer_config* ybf); +-#define vpx_extend_frame_borders vpx_extend_frame_borders_c +- +-void vpx_extend_frame_inner_borders_c(struct yv12_buffer_config* ybf); +-#define vpx_extend_frame_inner_borders vpx_extend_frame_inner_borders_c +- +-void vpx_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, +- struct yv12_buffer_config* dst_ybc); +-#define vpx_yv12_copy_frame vpx_yv12_copy_frame_c +- +-void vpx_yv12_copy_y_c(const struct yv12_buffer_config* src_ybc, +- struct yv12_buffer_config* dst_ybc); +-#define vpx_yv12_copy_y vpx_yv12_copy_y_c +- +-void vpx_scale_rtcd(void); +- +-#include "vpx_config.h" +- +-#ifdef RTCD_C +-#include "vpx_ports/ppc.h" +-static void setup_rtcd_internal(void) { +- int flags = ppc_simd_caps(); +- (void)flags; +-} +-#endif +- +-#ifdef __cplusplus +-} // extern "C" +-#endif +- +-#endif diff --git a/0002-third_party-lss-kernel-structs.patch b/0002-third_party-lss-kernel-structs.patch new file mode 100644 index 0000000..d68e340 --- /dev/null +++ b/0002-third_party-lss-kernel-structs.patch @@ -0,0 +1,168 @@ +Index: chromium-120.0.6099.71/third_party/lss/linux_syscall_support.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/lss/linux_syscall_support.h ++++ chromium-120.0.6099.71/third_party/lss/linux_syscall_support.h +@@ -87,7 +87,7 @@ + * Porting to other related platforms should not be difficult. + */ + #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \ +- defined(__mips__) || defined(__PPC__) || defined(__ARM_EABI__) || \ ++ defined(__mips__) || defined(__PPC__) || defined(__powerpc64__) || defined(__ARM_EABI__) || \ + defined(__aarch64__) || defined(__s390__) || defined(__e2k__) || \ + (defined(__riscv) && __riscv_xlen == 64) || defined(__loongarch_lp64)) \ + && (defined(__linux) || defined(__ANDROID__)) +@@ -399,7 +399,7 @@ struct kernel_stat64 { + unsigned __pad2; + unsigned long long st_blocks; + }; +-#elif defined __PPC__ ++#elif defined __PPC__ && !defined(__powerpc64__) + struct kernel_stat64 { + unsigned long long st_dev; + unsigned long long st_ino; +@@ -421,6 +421,28 @@ struct kernel_stat64 { + unsigned long __unused4; + unsigned long __unused5; + }; ++#elif defined(__powerpc64__) ++struct kernel_stat64 { ++ unsigned long int st_dev; ++ unsigned long int st_ino; ++ unsigned int st_mode; ++ unsigned long int st_nlink; ++ unsigned int st_uid; ++ unsigned int st_gid; ++ unsigned long int st_rdev; ++ unsigned short int __pad2; ++ long int st_size; ++ long int st_blksize; ++ long int st_blocks; ++ long int st_atime_; ++ unsigned long int st_atime_nsec_; ++ long int st_mtime_; ++ unsigned long int st_mtime_nsec_; ++ long int st_ctime_; ++ unsigned long int st_ctime_nsec_; ++ unsigned long int __unused4; ++ unsigned long int __unused5; ++}; + #elif defined(__e2k__) + struct kernel_stat64 { + unsigned long long st_dev; +@@ -537,7 +559,7 @@ struct kernel_stat { + uint64_t st_ctime_nsec_; + int64_t __unused4[3]; + }; +-#elif defined(__PPC__) ++#elif defined(__PPC__) && !defined(__powerpc64__) + typedef unsigned long kernel_blkcnt_t; + typedef unsigned long kernel_blksize_t; + typedef unsigned kernel_dev_t; +@@ -568,6 +590,37 @@ struct kernel_stat { + unsigned long __unused4; + unsigned long __unused5; + }; ++#elif defined(__powerpc64__) ++typedef long int kernel_blkcnt_t; ++typedef long int kernel_blksize_t; ++typedef unsigned long int kernel_dev_t; ++typedef unsigned int kernel_gid_t; ++typedef unsigned long int kernel_ino_t; ++typedef unsigned int kernel_mode_t; ++typedef unsigned long int kernel_nlink_t; ++typedef long int kernel_off_t; ++typedef long int kernel_time_t; ++typedef unsigned int kernel_uid_t; ++struct kernel_stat { ++ kernel_dev_t st_dev; ++ kernel_ino_t st_ino; ++ kernel_mode_t st_mode; ++ kernel_nlink_t st_nlink; ++ kernel_uid_t st_uid; ++ kernel_gid_t st_gid; ++ kernel_dev_t st_rdev; ++ kernel_off_t st_size; ++ kernel_blksize_t st_blksize; ++ kernel_blkcnt_t st_blocks; ++ kernel_time_t st_atime_; ++ unsigned long int st_atime_nsec_; ++ kernel_time_t st_mtime_; ++ unsigned long int st_mtime_nsec_; ++ kernel_time_t st_ctime_; ++ unsigned long int st_ctime_nsec_; ++ unsigned long int __unused4; ++ unsigned long int __unused5; ++}; + #elif (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64) + typedef int kernel_blkcnt_t; + typedef int kernel_blksize_t; +@@ -1824,6 +1877,28 @@ struct kernel_statx { + #ifndef __NR_getcpu + #define __NR_getcpu 302 + #endif ++ ++/* Linux commit 86250b9d12caa1a3dee12a7cf638b7dd70eaadb6 (2010) adds ++ * direct socket system calls to PPC */ ++#ifndef __NR_socket ++#define __NR_socket 326 ++#endif ++#ifndef __NR_socketpair ++#define __NR_socketpair 333 ++#endif ++#ifndef __NR_sendto ++#define __NR_sendto 335 ++#endif ++#ifndef __NR_shutdown ++#define __NR_shutdown 338 ++#endif ++#ifndef __NR_sendmsg ++#define __NR_sendmsg 341 ++#endif ++#ifndef __NR_recvmsg ++#define __NR_recvmsg 342 ++#endif ++ + /* End of powerpc defininitions */ + #elif defined(__s390__) + #ifndef __NR_quotactl +@@ -3363,6 +3438,11 @@ struct kernel_statx { + /* TODO(csilvers): consider wrapping some args up in a struct, like we + * do for i386's _syscall6, so we can compile successfully on gcc 2.95 + */ ++ #ifdef __powerpc64__ ++ /* TODO: implement clone() for ppc64. ++ * until then, use system libc */ ++ #define sys_clone clone ++ #else + LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, + int flags, void *arg, int *parent_tidptr, + void *newtls, int *child_tidptr) { +@@ -3433,6 +3513,7 @@ struct kernel_statx { + } + LSS_RETURN(int, __ret, __err); + } ++ #endif + #elif defined(__s390__) + #undef LSS_REG + #define LSS_REG(r, a) register unsigned long __r##r __asm__("r"#r) = (unsigned long) a +@@ -4783,7 +4864,7 @@ struct kernel_statx { + LSS_INLINE _syscall6(void*, mmap, void*, addr, size_t, length, int, prot, + int, flags, int, fd, int64_t, offset) + #endif +- #if defined(__PPC__) ++ #if defined(__PPC__) && !defined(__powerpc64__) + #undef LSS_SC_LOADARGS_0 + #define LSS_SC_LOADARGS_0(dummy...) + #undef LSS_SC_LOADARGS_1 +@@ -5173,7 +5254,11 @@ struct kernel_statx { + #endif + + #if !defined(__NR_pipe) ++#if defined(__powerpc64__) ++ LSS_INLINE pid_t LSS_NAME(pipe)(int *pipefd) { ++#else + LSS_INLINE int LSS_NAME(pipe)(int *pipefd) { ++#endif + return LSS_NAME(pipe2)(pipefd, 0); + } + #endif diff --git a/0003-sandbox-linux-system_headers-Update-linux-seccomp-he.patch b/0003-sandbox-linux-system_headers-Update-linux-seccomp-he.patch new file mode 100644 index 0000000..a215adb --- /dev/null +++ b/0003-sandbox-linux-system_headers-Update-linux-seccomp-he.patch @@ -0,0 +1,37 @@ +From cca78240860abb63bbcfe94d1e5f04a1f23c527d Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 19:11:56 -0500 +Subject: [PATCH 3/4] sandbox/linux/system_headers: Update linux seccomp header + for ppc64 + +--- + sandbox/linux/system_headers/linux_seccomp.h | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/linux_seccomp.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/system_headers/linux_seccomp.h ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/linux_seccomp.h +@@ -38,6 +38,9 @@ + #ifndef EM_AARCH64 + #define EM_AARCH64 183 + #endif ++#ifndef EM_PPC64 ++#define EM_PPC64 21 ++#endif + + #ifndef __AUDIT_ARCH_64BIT + #define __AUDIT_ARCH_64BIT 0x80000000 +@@ -70,6 +73,12 @@ + #ifndef AUDIT_ARCH_AARCH64 + #define AUDIT_ARCH_AARCH64 (EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE) + #endif ++#ifndef AUDIT_ARCH_PPC64 ++#define AUDIT_ARCH_PPC64 (EM_PPC64 | __AUDIT_ARCH_64BIT) ++#endif ++#ifndef AUDIT_ARCH_PPC64LE ++#define AUDIT_ARCH_PPC64LE (EM_PPC64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE) ++#endif + + // For prctl.h + #ifndef PR_SET_SECCOMP diff --git a/0003-third_party-ffmpeg-Add-ppc64-generated-config.patch b/0003-third_party-ffmpeg-Add-ppc64-generated-config.patch new file mode 100644 index 0000000..a2bef0b --- /dev/null +++ b/0003-third_party-ffmpeg-Add-ppc64-generated-config.patch @@ -0,0 +1,10374 @@ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/config.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/config.h +@@ -0,0 +1,760 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-error-resilience --disable-iconv --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/CHROMIUM_REBUILD/CHROMIUM_120/NEW/chromium-120.0.6099.62/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --arch=ppc64le --extra-cflags='-mcpu=power8' --enable-pic --cc=clang --cxx=clang++ --ld=clang --extra-ldflags='-fuse-ld=lld' --enable-decoder='aac,h264' --enable-demuxer=aac --enable-parser='aac,h264'" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2023 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "Debian clang version 16.0.6 (19)" ++#define OS_NAME linux ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_LOONGARCH 0 ++#define ARCH_LOONGARCH32 0 ++#define ARCH_LOONGARCH64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 1 ++#define ARCH_PPC64 1 ++#define ARCH_RISCV 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_DOTPROD 0 ++#define HAVE_I8MM 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 1 ++#define HAVE_DCBZL 1 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 1 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 1 ++#define HAVE_RVV 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_AVX512ICL 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_LSX 0 ++#define HAVE_LASX 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_DOTPROD_EXTERNAL 0 ++#define HAVE_I8MM_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_RVV_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_AVX512ICL_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_LSX_EXTERNAL 0 ++#define HAVE_LASX_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_DOTPROD_INLINE 0 ++#define HAVE_I8MM_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_RVV_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_AVX512ICL_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_LSX_INLINE 0 ++#define HAVE_LASX_INLINE 0 ++#define HAVE_ALIGNED_STACK 1 ++#define HAVE_FAST_64BIT 1 ++#define HAVE_FAST_CLZ 1 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_FAST_FLOAT16 0 ++#define HAVE_LOCAL_ALIGNED 1 ++#define HAVE_SIMD_ALIGN_16 1 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 1 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_HWCAP_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_DMA_BUF_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 1 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM_BUF 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETAUXVAL 1 ++#define HAVE_GETENV 1 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PRCTL 0 /* #define HAVE_PRCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 0 -- forced to 0 for Fuchsia */ ++#define HAVE_SYSCTLBYNAME 0 ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VAAPI_WIN32 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_DOTPROD_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_I8MM_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 1 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 1 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 1 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_DPI_AWARENESS_CONTEXT 0 ++#define HAVE_IDXGIOUTPUT5 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0 ++#define HAVE_KCMVIDEOCODECTYPE_VP9 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_GZIP 1 ++#define HAVE_LIBDRM_GETFB2 0 ++#define HAVE_MAKEINFO 0 ++#define HAVE_MAKEINFO_HTML 0 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define HAVE_XMLLINT 1 ++#define HAVE_ZLIB_GZIP 0 ++#define HAVE_OPENVINO2 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_HTTP_SERVE_FILES_EXAMPLE 1 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READ_CALLBACK_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_DECODE_FILTER_VIDEO_EXAMPLE 0 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUX_DECODE_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_MUX_EXAMPLE 0 ++#define CONFIG_QSV_DECODE_EXAMPLE 0 ++#define CONFIG_REMUX_EXAMPLE 1 ++#define CONFIG_RESAMPLE_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALE_VIDEO_EXAMPLE 0 ++#define CONFIG_SHOW_METADATA_EXAMPLE 1 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_QSV_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LCMS2 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBARIBCAPTION 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBHARFBUZZ 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBJXL 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPENVINO 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPLACEBO 0 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRIST 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHADERC 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBSVTAV1 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBUAVS3D 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_MEDIAFOUNDATION 0 ++#define CONFIG_METAL 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_LIBVPL 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 0 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 1 ++#define CONFIG_LSP 0 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MACOS_KPERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_PTX_COMPRESSION 0 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 0 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 1 ++#define CONFIG_ATSC_A53 1 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 0 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 1 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_H266 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DEFLATE_WRAPPER 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DOVI_RPU 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EVCPARSE 0 ++#define CONFIG_EXIF 0 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 0 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 0 ++#define CONFIG_H264CHROMA 1 ++#define CONFIG_H264DSP 1 ++#define CONFIG_H264PARSE 1 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 1 ++#define CONFIG_H264_SEI 1 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HEVC_SEI 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 0 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_INFLATE_WRAPPER 0 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 0 ++#define CONFIG_MPEG_ER 0 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEG4AUDIO 1 ++#define CONFIG_MPEGVIDEO 0 ++#define CONFIG_MPEGVIDEODEC 0 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSMPEG4DEC 0 ++#define CONFIG_MSMPEG4ENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 0 ++#define CONFIG_QPELDSP 0 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 1 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 1 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#endif /* FFMPEG_CONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/config_components.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/config_components.h +@@ -0,0 +1,2196 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_COMPONENTS_H ++#define FFMPEG_CONFIG_COMPONENTS_H ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_DTS2PTS_BSF 0 ++#define CONFIG_DV_ERROR_MARKER_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MEDIA100_TO_MJPEGB_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 0 ++#define CONFIG_OPUS_METADATA_BSF 0 ++#define CONFIG_PCM_RECHUNK_BSF 0 ++#define CONFIG_PGS_FRAME_MERGE_BSF 0 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_SETTS_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_VVC_METADATA_BSF 0 ++#define CONFIG_VVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_EVC_FRAME_MERGE_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ARGO_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CRI_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GEM_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 0 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 1 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_IPU_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MEDIA100_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOBICLIP_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 0 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSP2_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NOTCHLC_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PDV_DECODER 0 ++#define CONFIG_PFM_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PGX_DECODER 0 ++#define CONFIG_PHM_DECODER 0 ++#define CONFIG_PHOTOCD_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QOI_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RKA_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RTV1_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGA_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SIMBIOSIS_IMX_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SPEEX_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBN_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMIX_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_VQC_DECODER 0 ++#define CONFIG_WBMP_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 1 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 0 ++#define CONFIG_AMRWB_DECODER 0 ++#define CONFIG_APAC_DECODER 0 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_BONK_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DFPWM_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FASTAUDIO_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_FTR_DECODER 0 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 0 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_HDR_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MISC4_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_MSNSIREN_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_OSQ_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVARC_DECODER 0 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_SGA_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_CBD2_DPCM_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_WADY_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_ACORN_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MOFLEX_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_XMD_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBCAPTION_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBJXL_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBUAVS3D_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_AV1_DECODER 0 ++#define CONFIG_AV1_CUVID_DECODER 0 ++#define CONFIG_AV1_MEDIACODEC_DECODER 0 ++#define CONFIG_AV1_QSV_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_VNULL_DECODER 0 ++#define CONFIG_ANULL_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BITPACKED_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CFHD_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_EXR_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_H264_MEDIACODEC_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSRLE_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PFM_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PHM_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QOI_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RPZA_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SMC_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SPEEDHQ_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VBN_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WBMP_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_DFPWM_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_HDR_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_BLURAY_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_ARGO_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_AMV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_ALP_ENCODER 0 ++#define CONFIG_ADPCM_IMA_APM_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_SSI_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WS_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_TTML_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBJXL_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBSVTAV1_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_AAC_MF_ENCODER 0 ++#define CONFIG_AC3_MF_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_AV1_MEDIACODEC_ENCODER 0 ++#define CONFIG_AV1_NVENC_ENCODER 0 ++#define CONFIG_AV1_QSV_ENCODER 0 ++#define CONFIG_AV1_AMF_ENCODER 0 ++#define CONFIG_AV1_VAAPI_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_MF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_MEDIACODEC_ENCODER 0 ++#define CONFIG_HEVC_MF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MP3_MF_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_VP8_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_VNULL_ENCODER 0 ++#define CONFIG_ANULL_ENCODER 0 ++#define CONFIG_AV1_D3D11VA_HWACCEL 0 ++#define CONFIG_AV1_D3D11VA2_HWACCEL 0 ++#define CONFIG_AV1_DXVA2_HWACCEL 0 ++#define CONFIG_AV1_NVDEC_HWACCEL 0 ++#define CONFIG_AV1_VAAPI_HWACCEL 0 ++#define CONFIG_AV1_VDPAU_HWACCEL 0 ++#define CONFIG_AV1_VULKAN_HWACCEL 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_VULKAN_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_VULKAN_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_VP9_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 1 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AMR_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_AVS3_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_CRI_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DOLBY_E_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_EVC_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_FTR_PARSER 0 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 0 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 0 ++#define CONFIG_H264_PARSER 1 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_HDR_PARSER 0 ++#define CONFIG_IPU_PARSER 0 ++#define CONFIG_JPEG2000_PARSER 0 ++#define CONFIG_JPEGXL_PARSER 0 ++#define CONFIG_MISC4_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 0 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_QOI_PARSER 0 ++#define CONFIG_RV34_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_VVC_PARSER 0 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XBM_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_XWD_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_AUDIOTOOLBOX_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADECORRELATE_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADENORM_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_ADRC_FILTER 0 ++#define CONFIG_ADYNAMICEQUALIZER_FILTER 0 ++#define CONFIG_ADYNAMICSMOOTH_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AEXCITER_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AFREQSHIFT_FILTER 0 ++#define CONFIG_AFWTDN_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALATENCY_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMF_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APHASESHIFT_FILTER 0 ++#define CONFIG_APSNR_FILTER 0 ++#define CONFIG_APSYCLIP_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARLS_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASDR_FILTER 0 ++#define CONFIG_ASEGMENT_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASISDR_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPECTRALSTATS_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ASUBBOOST_FILTER 0 ++#define CONFIG_ASUBCUT_FILTER 0 ++#define CONFIG_ASUPERCUT_FILTER 0 ++#define CONFIG_ASUPERPASS_FILTER 0 ++#define CONFIG_ASUPERSTOP_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATILT_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DIALOGUENHANCE_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_SPEECHNORM_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TILTSHELF_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VIRTUALBASS_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFDELAYSRC_FILTER 0 ++#define CONFIG_AFIREQSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BACKGROUNDKEY_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BILATERAL_CUDA_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BLEND_VULKAN_FILTER 0 ++#define CONFIG_BLOCKDETECT_FILTER 0 ++#define CONFIG_BLURDETECT_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_BWDIF_CUDA_FILTER 0 ++#define CONFIG_BWDIF_VULKAN_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CCREPACK_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMAKEY_CUDA_FILTER 0 ++#define CONFIG_CHROMANR_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORCONTRAST_FILTER 0 ++#define CONFIG_COLORCORRECT_FILTER 0 ++#define CONFIG_COLORIZE_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMAP_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_COLORSPACE_CUDA_FILTER 0 ++#define CONFIG_COLORTEMPERATURE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_CORR_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DBLUR_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_CLASSIFY_FILTER 0 ++#define CONFIG_DNN_DETECT_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EPX_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_ESTDIF_FILTER 0 ++#define CONFIG_EXPOSURE_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FEEDBACK_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLIP_VULKAN_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GBLUR_VULKAN_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GRAYWORLD_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_GUIDED_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HFLIP_VULKAN_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HSVHOLD_FILTER 0 ++#define CONFIG_HSVKEY_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HUESATURATION_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_ICCDETECT_FILTER 0 ++#define CONFIG_ICCGEN_FILTER 0 ++#define CONFIG_IDENTITY_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_KIRSCH_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LATENCY_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBPLACEBO_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIBVMAF_CUDA_FILTER 0 ++#define CONFIG_LIMITDIFF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MONOCHROME_FILTER 0 ++#define CONFIG_MORPHO_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_MSAD_FILTER 0 ++#define CONFIG_MULTIPLY_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NLMEANS_VULKAN_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VAAPI_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXELIZE_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMAP_OPENCL_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VT_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCALE2REF_NPP_FILTER 0 ++#define CONFIG_SCDET_FILTER 0 ++#define CONFIG_SCHARR_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SEGMENT_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPEN_NPP_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHEAR_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPIXELS_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SITI_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_SSIM360_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIDEQUALIZER_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRANSPOSE_VT_FILTER 0 ++#define CONFIG_TRANSPOSE_VULKAN_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_UNTILE_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VARBLUR_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFLIP_VULKAN_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIF_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XCORRELATE_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XFADE_VULKAN_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YADIF_VIDEOTOOLBOX_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_HSTACK_VAAPI_FILTER 0 ++#define CONFIG_VSTACK_VAAPI_FILTER 0 ++#define CONFIG_XSTACK_VAAPI_FILTER 0 ++#define CONFIG_HSTACK_QSV_FILTER 0 ++#define CONFIG_VSTACK_QSV_FILTER 0 ++#define CONFIG_XSTACK_QSV_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COLOR_VULKAN_FILTER 0 ++#define CONFIG_COLORCHART_FILTER 0 ++#define CONFIG_COLORSPECTRUM_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_DDAGRAB_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_GRADIENTS_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_ZONEPLATE_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_A3DSCOPE_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWCWT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AVSYNCTEST_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 1 ++#define CONFIG_AAX_DEMUXER 0 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_AC4_DEMUXER 0 ++#define CONFIG_ACE_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 0 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APAC_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ARGO_BRP_DEMUXER 0 ++#define CONFIG_ARGO_CVG_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 0 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_AVS3_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BINKA_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BITPACKED_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_BONK_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DFPWM_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_EVC_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_IMF_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IPU_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_JPEGXL_ANIM_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LAF_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LUODAT_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MCA_DEMUXER 0 ++#define CONFIG_MCC_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MODS_DEMUXER 0 ++#define CONFIG_MOFLEX_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MSP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OBU_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_OSQ_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PDV_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PP_BNK_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RKA_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SCD_DEMUXER 0 ++#define CONFIG_SDNS_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SGA_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SIMBIOSIS_IMX_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SVS_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_USM_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_VVC_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WADY_DEMUXER 0 ++#define CONFIG_WAVARC_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMD_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_CRI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_HDR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGXL_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PFM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QOI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_VBN_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_AC4_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_ALP_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_AMV_MUXER 0 ++#define CONFIG_APM_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ARGO_ASF_MUXER 0 ++#define CONFIG_ARGO_CVG_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVIF_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_AVS3_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DFPWM_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_EVC_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_KVAG_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OBU_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_TTML_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_VVC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WSAUD_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CONCATF_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FD_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_GOPHERS_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRIST_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#define CONFIG_IPFS_GATEWAY_PROTOCOL 0 ++#define CONFIG_IPNS_GATEWAY_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_COMPONENTS_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/bsf_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/bsf_list.c +@@ -0,0 +1,2 @@ ++static const FFBitStreamFilter * const bitstream_filters[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/codec_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/codec_list.c +@@ -0,0 +1,20 @@ ++static const FFCodec * const codec_list[] = { ++ &ff_h264_decoder, ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_aac_decoder, ++ &ff_flac_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/parser_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavcodec/parser_list.c +@@ -0,0 +1,11 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_aac_parser, ++ &ff_flac_parser, ++ &ff_h264_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/demuxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/demuxer_list.c +@@ -0,0 +1,9 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_aac_demuxer, ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/muxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const FFOutputFormat * const muxer_list[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/protocol_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavutil/avconfig.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 1 ++#endif /* AVUTIL_AVCONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavutil/ffversion.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chrome/linux/ppc64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "5.1.git" ++#endif /* AVUTIL_FFVERSION_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/config.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/config.h +@@ -0,0 +1,760 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-iconv --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/CHROMIUM_REBUILD/CHROMIUM_120/NEW/chromium-120.0.6099.62/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --arch=ppc64le --extra-cflags='-mcpu=power8' --enable-pic --cc=clang --cxx=clang++ --ld=clang --extra-ldflags='-fuse-ld=lld' --enable-decoder='aac,h264' --enable-demuxer=aac --enable-parser='aac,h264' --enable-decoder=mpeg4 --enable-parser='h263,mpeg4video' --enable-demuxer=avi" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2023 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "Debian clang version 16.0.6 (19)" ++#define OS_NAME linux ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_LOONGARCH 0 ++#define ARCH_LOONGARCH32 0 ++#define ARCH_LOONGARCH64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 1 ++#define ARCH_PPC64 1 ++#define ARCH_RISCV 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_DOTPROD 0 ++#define HAVE_I8MM 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 1 ++#define HAVE_DCBZL 1 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 1 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 1 ++#define HAVE_RVV 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_AVX512ICL 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_LSX 0 ++#define HAVE_LASX 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_DOTPROD_EXTERNAL 0 ++#define HAVE_I8MM_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_RVV_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_AVX512ICL_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_LSX_EXTERNAL 0 ++#define HAVE_LASX_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_DOTPROD_INLINE 0 ++#define HAVE_I8MM_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_RVV_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_AVX512ICL_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_LSX_INLINE 0 ++#define HAVE_LASX_INLINE 0 ++#define HAVE_ALIGNED_STACK 1 ++#define HAVE_FAST_64BIT 1 ++#define HAVE_FAST_CLZ 1 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_FAST_FLOAT16 0 ++#define HAVE_LOCAL_ALIGNED 1 ++#define HAVE_SIMD_ALIGN_16 1 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 1 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_HWCAP_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_DMA_BUF_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 1 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM_BUF 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETAUXVAL 1 ++#define HAVE_GETENV 1 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PRCTL 0 /* #define HAVE_PRCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 0 -- forced to 0 for Fuchsia */ ++#define HAVE_SYSCTLBYNAME 0 ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VAAPI_WIN32 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_DOTPROD_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_I8MM_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 1 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 1 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 1 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_DPI_AWARENESS_CONTEXT 0 ++#define HAVE_IDXGIOUTPUT5 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0 ++#define HAVE_KCMVIDEOCODECTYPE_VP9 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_GZIP 1 ++#define HAVE_LIBDRM_GETFB2 0 ++#define HAVE_MAKEINFO 0 ++#define HAVE_MAKEINFO_HTML 0 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define HAVE_XMLLINT 1 ++#define HAVE_ZLIB_GZIP 0 ++#define HAVE_OPENVINO2 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_HTTP_SERVE_FILES_EXAMPLE 1 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READ_CALLBACK_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_DECODE_FILTER_VIDEO_EXAMPLE 0 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUX_DECODE_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_MUX_EXAMPLE 0 ++#define CONFIG_QSV_DECODE_EXAMPLE 0 ++#define CONFIG_REMUX_EXAMPLE 1 ++#define CONFIG_RESAMPLE_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALE_VIDEO_EXAMPLE 0 ++#define CONFIG_SHOW_METADATA_EXAMPLE 1 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_QSV_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LCMS2 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBARIBCAPTION 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBHARFBUZZ 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBJXL 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPENVINO 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPLACEBO 0 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRIST 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHADERC 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBSVTAV1 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBUAVS3D 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_MEDIAFOUNDATION 0 ++#define CONFIG_METAL 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_LIBVPL 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 1 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 1 ++#define CONFIG_LSP 0 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MACOS_KPERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_PTX_COMPRESSION 0 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 0 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 1 ++#define CONFIG_ATSC_A53 1 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 1 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 1 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_H266 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DEFLATE_WRAPPER 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DOVI_RPU 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EVCPARSE 0 ++#define CONFIG_EXIF 1 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 0 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 1 ++#define CONFIG_H264CHROMA 1 ++#define CONFIG_H264DSP 1 ++#define CONFIG_H264PARSE 1 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 1 ++#define CONFIG_H264_SEI 1 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HEVC_SEI 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 1 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_INFLATE_WRAPPER 0 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 1 ++#define CONFIG_MPEG_ER 1 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEG4AUDIO 1 ++#define CONFIG_MPEGVIDEO 1 ++#define CONFIG_MPEGVIDEODEC 1 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSMPEG4DEC 0 ++#define CONFIG_MSMPEG4ENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 0 ++#define CONFIG_QPELDSP 1 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 1 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 1 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#endif /* FFMPEG_CONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/config_components.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/config_components.h +@@ -0,0 +1,2196 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_COMPONENTS_H ++#define FFMPEG_CONFIG_COMPONENTS_H ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_DTS2PTS_BSF 0 ++#define CONFIG_DV_ERROR_MARKER_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MEDIA100_TO_MJPEGB_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 0 ++#define CONFIG_OPUS_METADATA_BSF 0 ++#define CONFIG_PCM_RECHUNK_BSF 0 ++#define CONFIG_PGS_FRAME_MERGE_BSF 0 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_SETTS_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_VVC_METADATA_BSF 0 ++#define CONFIG_VVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_EVC_FRAME_MERGE_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ARGO_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CRI_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GEM_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 1 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 1 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_IPU_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MEDIA100_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOBICLIP_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 1 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSP2_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NOTCHLC_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PDV_DECODER 0 ++#define CONFIG_PFM_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PGX_DECODER 0 ++#define CONFIG_PHM_DECODER 0 ++#define CONFIG_PHOTOCD_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QOI_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RKA_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RTV1_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGA_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SIMBIOSIS_IMX_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SPEEX_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBN_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMIX_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_VQC_DECODER 0 ++#define CONFIG_WBMP_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 1 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 0 ++#define CONFIG_AMRWB_DECODER 0 ++#define CONFIG_APAC_DECODER 0 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_BONK_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DFPWM_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FASTAUDIO_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_FTR_DECODER 0 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 0 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_HDR_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MISC4_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_MSNSIREN_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_OSQ_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVARC_DECODER 0 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_SGA_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_CBD2_DPCM_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_WADY_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_ACORN_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MOFLEX_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_XMD_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBCAPTION_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBJXL_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBUAVS3D_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_AV1_DECODER 0 ++#define CONFIG_AV1_CUVID_DECODER 0 ++#define CONFIG_AV1_MEDIACODEC_DECODER 0 ++#define CONFIG_AV1_QSV_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_VNULL_DECODER 0 ++#define CONFIG_ANULL_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BITPACKED_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CFHD_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_EXR_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_H264_MEDIACODEC_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSRLE_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PFM_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PHM_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QOI_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RPZA_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SMC_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SPEEDHQ_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VBN_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WBMP_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_DFPWM_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_HDR_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_BLURAY_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_ARGO_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_AMV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_ALP_ENCODER 0 ++#define CONFIG_ADPCM_IMA_APM_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_SSI_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WS_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_TTML_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBJXL_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBSVTAV1_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_AAC_MF_ENCODER 0 ++#define CONFIG_AC3_MF_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_AV1_MEDIACODEC_ENCODER 0 ++#define CONFIG_AV1_NVENC_ENCODER 0 ++#define CONFIG_AV1_QSV_ENCODER 0 ++#define CONFIG_AV1_AMF_ENCODER 0 ++#define CONFIG_AV1_VAAPI_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_MF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_MEDIACODEC_ENCODER 0 ++#define CONFIG_HEVC_MF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MP3_MF_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_VP8_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_VNULL_ENCODER 0 ++#define CONFIG_ANULL_ENCODER 0 ++#define CONFIG_AV1_D3D11VA_HWACCEL 0 ++#define CONFIG_AV1_D3D11VA2_HWACCEL 0 ++#define CONFIG_AV1_DXVA2_HWACCEL 0 ++#define CONFIG_AV1_NVDEC_HWACCEL 0 ++#define CONFIG_AV1_VAAPI_HWACCEL 0 ++#define CONFIG_AV1_VDPAU_HWACCEL 0 ++#define CONFIG_AV1_VULKAN_HWACCEL 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_VULKAN_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_VULKAN_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_VP9_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 1 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AMR_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_AVS3_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_CRI_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DOLBY_E_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_EVC_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_FTR_PARSER 0 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 0 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 1 ++#define CONFIG_H264_PARSER 1 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_HDR_PARSER 0 ++#define CONFIG_IPU_PARSER 0 ++#define CONFIG_JPEG2000_PARSER 0 ++#define CONFIG_JPEGXL_PARSER 0 ++#define CONFIG_MISC4_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 1 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_QOI_PARSER 0 ++#define CONFIG_RV34_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_VVC_PARSER 0 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XBM_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_XWD_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_AUDIOTOOLBOX_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADECORRELATE_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADENORM_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_ADRC_FILTER 0 ++#define CONFIG_ADYNAMICEQUALIZER_FILTER 0 ++#define CONFIG_ADYNAMICSMOOTH_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AEXCITER_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AFREQSHIFT_FILTER 0 ++#define CONFIG_AFWTDN_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALATENCY_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMF_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APHASESHIFT_FILTER 0 ++#define CONFIG_APSNR_FILTER 0 ++#define CONFIG_APSYCLIP_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARLS_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASDR_FILTER 0 ++#define CONFIG_ASEGMENT_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASISDR_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPECTRALSTATS_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ASUBBOOST_FILTER 0 ++#define CONFIG_ASUBCUT_FILTER 0 ++#define CONFIG_ASUPERCUT_FILTER 0 ++#define CONFIG_ASUPERPASS_FILTER 0 ++#define CONFIG_ASUPERSTOP_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATILT_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DIALOGUENHANCE_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_SPEECHNORM_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TILTSHELF_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VIRTUALBASS_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFDELAYSRC_FILTER 0 ++#define CONFIG_AFIREQSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BACKGROUNDKEY_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BILATERAL_CUDA_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BLEND_VULKAN_FILTER 0 ++#define CONFIG_BLOCKDETECT_FILTER 0 ++#define CONFIG_BLURDETECT_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_BWDIF_CUDA_FILTER 0 ++#define CONFIG_BWDIF_VULKAN_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CCREPACK_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMAKEY_CUDA_FILTER 0 ++#define CONFIG_CHROMANR_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORCONTRAST_FILTER 0 ++#define CONFIG_COLORCORRECT_FILTER 0 ++#define CONFIG_COLORIZE_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMAP_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_COLORSPACE_CUDA_FILTER 0 ++#define CONFIG_COLORTEMPERATURE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_CORR_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DBLUR_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_CLASSIFY_FILTER 0 ++#define CONFIG_DNN_DETECT_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EPX_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_ESTDIF_FILTER 0 ++#define CONFIG_EXPOSURE_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FEEDBACK_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLIP_VULKAN_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GBLUR_VULKAN_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GRAYWORLD_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_GUIDED_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HFLIP_VULKAN_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HSVHOLD_FILTER 0 ++#define CONFIG_HSVKEY_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HUESATURATION_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_ICCDETECT_FILTER 0 ++#define CONFIG_ICCGEN_FILTER 0 ++#define CONFIG_IDENTITY_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_KIRSCH_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LATENCY_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBPLACEBO_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIBVMAF_CUDA_FILTER 0 ++#define CONFIG_LIMITDIFF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MONOCHROME_FILTER 0 ++#define CONFIG_MORPHO_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_MSAD_FILTER 0 ++#define CONFIG_MULTIPLY_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NLMEANS_VULKAN_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VAAPI_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXELIZE_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMAP_OPENCL_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VT_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCALE2REF_NPP_FILTER 0 ++#define CONFIG_SCDET_FILTER 0 ++#define CONFIG_SCHARR_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SEGMENT_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPEN_NPP_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHEAR_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPIXELS_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SITI_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_SSIM360_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIDEQUALIZER_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRANSPOSE_VT_FILTER 0 ++#define CONFIG_TRANSPOSE_VULKAN_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_UNTILE_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VARBLUR_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFLIP_VULKAN_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIF_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XCORRELATE_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XFADE_VULKAN_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YADIF_VIDEOTOOLBOX_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_HSTACK_VAAPI_FILTER 0 ++#define CONFIG_VSTACK_VAAPI_FILTER 0 ++#define CONFIG_XSTACK_VAAPI_FILTER 0 ++#define CONFIG_HSTACK_QSV_FILTER 0 ++#define CONFIG_VSTACK_QSV_FILTER 0 ++#define CONFIG_XSTACK_QSV_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COLOR_VULKAN_FILTER 0 ++#define CONFIG_COLORCHART_FILTER 0 ++#define CONFIG_COLORSPECTRUM_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_DDAGRAB_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_GRADIENTS_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_ZONEPLATE_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_A3DSCOPE_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWCWT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AVSYNCTEST_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 1 ++#define CONFIG_AAX_DEMUXER 0 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_AC4_DEMUXER 0 ++#define CONFIG_ACE_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 0 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APAC_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ARGO_BRP_DEMUXER 0 ++#define CONFIG_ARGO_CVG_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 1 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_AVS3_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BINKA_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BITPACKED_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_BONK_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DFPWM_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_EVC_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_IMF_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IPU_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_JPEGXL_ANIM_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LAF_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LUODAT_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MCA_DEMUXER 0 ++#define CONFIG_MCC_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MODS_DEMUXER 0 ++#define CONFIG_MOFLEX_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MSP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OBU_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_OSQ_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PDV_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PP_BNK_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RKA_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SCD_DEMUXER 0 ++#define CONFIG_SDNS_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SGA_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SIMBIOSIS_IMX_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SVS_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_USM_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_VVC_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WADY_DEMUXER 0 ++#define CONFIG_WAVARC_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMD_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_CRI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_HDR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGXL_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PFM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QOI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_VBN_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_AC4_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_ALP_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_AMV_MUXER 0 ++#define CONFIG_APM_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ARGO_ASF_MUXER 0 ++#define CONFIG_ARGO_CVG_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVIF_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_AVS3_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DFPWM_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_EVC_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_KVAG_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OBU_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_TTML_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_VVC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WSAUD_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CONCATF_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FD_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_GOPHERS_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRIST_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#define CONFIG_IPFS_GATEWAY_PROTOCOL 0 ++#define CONFIG_IPNS_GATEWAY_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_COMPONENTS_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/bsf_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/bsf_list.c +@@ -0,0 +1,2 @@ ++static const FFBitStreamFilter * const bitstream_filters[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/codec_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/codec_list.c +@@ -0,0 +1,22 @@ ++static const FFCodec * const codec_list[] = { ++ &ff_h263_decoder, ++ &ff_h264_decoder, ++ &ff_mpeg4_decoder, ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_aac_decoder, ++ &ff_flac_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/parser_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavcodec/parser_list.c +@@ -0,0 +1,13 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_aac_parser, ++ &ff_flac_parser, ++ &ff_h263_parser, ++ &ff_h264_parser, ++ &ff_mpeg4video_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/demuxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/demuxer_list.c +@@ -0,0 +1,10 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_aac_demuxer, ++ &ff_avi_demuxer, ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/muxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const FFOutputFormat * const muxer_list[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/protocol_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavutil/avconfig.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 1 ++#endif /* AVUTIL_AVCONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavutil/ffversion.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/ChromeOS/linux/ppc64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "5.1.git" ++#endif /* AVUTIL_FFVERSION_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/config.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/config.h +@@ -0,0 +1,760 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_H ++#define FFMPEG_CONFIG_H ++/* #define FFMPEG_CONFIGURATION "--disable-everything --disable-all --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-static --enable-avcodec --enable-avformat --enable-avutil --enable-static --enable-libopus --disable-debug --disable-bzlib --disable-error-resilience --disable-iconv --disable-network --disable-schannel --disable-sdl2 --disable-symver --disable-xlib --disable-zlib --disable-securetransport --disable-faan --disable-alsa --disable-autodetect --enable-decoder='vorbis,libopus,flac' --enable-decoder='pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,mp3' --enable-decoder='pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw' --enable-demuxer='ogg,matroska,wav,flac,mp3,mov' --enable-parser='opus,vorbis,flac,mpegaudio,vp9' --extra-cflags=-I/CHROMIUM_REBUILD/CHROMIUM_120/NEW/chromium-120.0.6099.62/third_party/opus/src/include --disable-linux-perf --x86asmexe=nasm --optflags='\"-O2\"' --enable-decoder='theora,vp8' --enable-parser='vp3,vp8' --arch=ppc64le --extra-cflags='-mcpu=power8' --enable-pic --cc=clang --cxx=clang++ --ld=clang --extra-ldflags='-fuse-ld=lld'" -- elide long configuration string from binary */ ++#define FFMPEG_LICENSE "LGPL version 2.1 or later" ++#define CONFIG_THIS_YEAR 2023 ++#define FFMPEG_DATADIR "/usr/local/share/ffmpeg" ++#define AVCONV_DATADIR "/usr/local/share/ffmpeg" ++#define CC_IDENT "Debian clang version 16.0.6 (19)" ++#define OS_NAME linux ++#define av_restrict restrict ++#define EXTERN_PREFIX "" ++#define EXTERN_ASM ++#define BUILDSUF "" ++#define SLIBSUF ".so" ++#define HAVE_MMX2 HAVE_MMXEXT ++#define SWS_MAX_FILTER_SIZE 256 ++#define ARCH_AARCH64 0 ++#define ARCH_ALPHA 0 ++#define ARCH_ARM 0 ++#define ARCH_AVR32 0 ++#define ARCH_AVR32_AP 0 ++#define ARCH_AVR32_UC 0 ++#define ARCH_BFIN 0 ++#define ARCH_IA64 0 ++#define ARCH_LOONGARCH 0 ++#define ARCH_LOONGARCH32 0 ++#define ARCH_LOONGARCH64 0 ++#define ARCH_M68K 0 ++#define ARCH_MIPS 0 ++#define ARCH_MIPS64 0 ++#define ARCH_PARISC 0 ++#define ARCH_PPC 1 ++#define ARCH_PPC64 1 ++#define ARCH_RISCV 0 ++#define ARCH_S390 0 ++#define ARCH_SH4 0 ++#define ARCH_SPARC 0 ++#define ARCH_SPARC64 0 ++#define ARCH_TILEGX 0 ++#define ARCH_TILEPRO 0 ++#define ARCH_X86 0 ++#define ARCH_X86_32 0 ++#define ARCH_X86_64 0 ++#define HAVE_ARMV5TE 0 ++#define HAVE_ARMV6 0 ++#define HAVE_ARMV6T2 0 ++#define HAVE_ARMV8 0 ++#define HAVE_DOTPROD 0 ++#define HAVE_I8MM 0 ++#define HAVE_NEON 0 ++#define HAVE_VFP 0 ++#define HAVE_VFPV3 0 ++#define HAVE_SETEND 0 ++#define HAVE_ALTIVEC 1 ++#define HAVE_DCBZL 1 ++#define HAVE_LDBRX 0 ++#define HAVE_POWER8 1 ++#define HAVE_PPC4XX 0 ++#define HAVE_VSX 1 ++#define HAVE_RVV 0 ++#define HAVE_AESNI 0 ++#define HAVE_AMD3DNOW 0 ++#define HAVE_AMD3DNOWEXT 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_AVX512ICL 0 ++#define HAVE_FMA3 0 ++#define HAVE_FMA4 0 ++#define HAVE_MMX 0 ++#define HAVE_MMXEXT 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSE4 0 ++#define HAVE_SSE42 0 ++#define HAVE_SSSE3 0 ++#define HAVE_XOP 0 ++#define HAVE_CPUNOP 0 ++#define HAVE_I686 0 ++#define HAVE_MIPSFPU 0 ++#define HAVE_MIPS32R2 0 ++#define HAVE_MIPS32R5 0 ++#define HAVE_MIPS64R2 0 ++#define HAVE_MIPS32R6 0 ++#define HAVE_MIPS64R6 0 ++#define HAVE_MIPSDSP 0 ++#define HAVE_MIPSDSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_LOONGSON2 0 ++#define HAVE_LOONGSON3 0 ++#define HAVE_MMI 0 ++#define HAVE_LSX 0 ++#define HAVE_LASX 0 ++#define HAVE_ARMV5TE_EXTERNAL 0 ++#define HAVE_ARMV6_EXTERNAL 0 ++#define HAVE_ARMV6T2_EXTERNAL 0 ++#define HAVE_ARMV8_EXTERNAL 0 ++#define HAVE_DOTPROD_EXTERNAL 0 ++#define HAVE_I8MM_EXTERNAL 0 ++#define HAVE_NEON_EXTERNAL 0 ++#define HAVE_VFP_EXTERNAL 0 ++#define HAVE_VFPV3_EXTERNAL 0 ++#define HAVE_SETEND_EXTERNAL 0 ++#define HAVE_ALTIVEC_EXTERNAL 0 ++#define HAVE_DCBZL_EXTERNAL 0 ++#define HAVE_LDBRX_EXTERNAL 0 ++#define HAVE_POWER8_EXTERNAL 0 ++#define HAVE_PPC4XX_EXTERNAL 0 ++#define HAVE_VSX_EXTERNAL 0 ++#define HAVE_RVV_EXTERNAL 0 ++#define HAVE_AESNI_EXTERNAL 0 ++#define HAVE_AMD3DNOW_EXTERNAL 0 ++#define HAVE_AMD3DNOWEXT_EXTERNAL 0 ++#define HAVE_AVX_EXTERNAL 0 ++#define HAVE_AVX2_EXTERNAL 0 ++#define HAVE_AVX512_EXTERNAL 0 ++#define HAVE_AVX512ICL_EXTERNAL 0 ++#define HAVE_FMA3_EXTERNAL 0 ++#define HAVE_FMA4_EXTERNAL 0 ++#define HAVE_MMX_EXTERNAL 0 ++#define HAVE_MMXEXT_EXTERNAL 0 ++#define HAVE_SSE_EXTERNAL 0 ++#define HAVE_SSE2_EXTERNAL 0 ++#define HAVE_SSE3_EXTERNAL 0 ++#define HAVE_SSE4_EXTERNAL 0 ++#define HAVE_SSE42_EXTERNAL 0 ++#define HAVE_SSSE3_EXTERNAL 0 ++#define HAVE_XOP_EXTERNAL 0 ++#define HAVE_CPUNOP_EXTERNAL 0 ++#define HAVE_I686_EXTERNAL 0 ++#define HAVE_MIPSFPU_EXTERNAL 0 ++#define HAVE_MIPS32R2_EXTERNAL 0 ++#define HAVE_MIPS32R5_EXTERNAL 0 ++#define HAVE_MIPS64R2_EXTERNAL 0 ++#define HAVE_MIPS32R6_EXTERNAL 0 ++#define HAVE_MIPS64R6_EXTERNAL 0 ++#define HAVE_MIPSDSP_EXTERNAL 0 ++#define HAVE_MIPSDSPR2_EXTERNAL 0 ++#define HAVE_MSA_EXTERNAL 0 ++#define HAVE_LOONGSON2_EXTERNAL 0 ++#define HAVE_LOONGSON3_EXTERNAL 0 ++#define HAVE_MMI_EXTERNAL 0 ++#define HAVE_LSX_EXTERNAL 0 ++#define HAVE_LASX_EXTERNAL 0 ++#define HAVE_ARMV5TE_INLINE 0 ++#define HAVE_ARMV6_INLINE 0 ++#define HAVE_ARMV6T2_INLINE 0 ++#define HAVE_ARMV8_INLINE 0 ++#define HAVE_DOTPROD_INLINE 0 ++#define HAVE_I8MM_INLINE 0 ++#define HAVE_NEON_INLINE 0 ++#define HAVE_VFP_INLINE 0 ++#define HAVE_VFPV3_INLINE 0 ++#define HAVE_SETEND_INLINE 0 ++#define HAVE_ALTIVEC_INLINE 0 ++#define HAVE_DCBZL_INLINE 0 ++#define HAVE_LDBRX_INLINE 0 ++#define HAVE_POWER8_INLINE 0 ++#define HAVE_PPC4XX_INLINE 0 ++#define HAVE_VSX_INLINE 0 ++#define HAVE_RVV_INLINE 0 ++#define HAVE_AESNI_INLINE 0 ++#define HAVE_AMD3DNOW_INLINE 0 ++#define HAVE_AMD3DNOWEXT_INLINE 0 ++#define HAVE_AVX_INLINE 0 ++#define HAVE_AVX2_INLINE 0 ++#define HAVE_AVX512_INLINE 0 ++#define HAVE_AVX512ICL_INLINE 0 ++#define HAVE_FMA3_INLINE 0 ++#define HAVE_FMA4_INLINE 0 ++#define HAVE_MMX_INLINE 0 ++#define HAVE_MMXEXT_INLINE 0 ++#define HAVE_SSE_INLINE 0 ++#define HAVE_SSE2_INLINE 0 ++#define HAVE_SSE3_INLINE 0 ++#define HAVE_SSE4_INLINE 0 ++#define HAVE_SSE42_INLINE 0 ++#define HAVE_SSSE3_INLINE 0 ++#define HAVE_XOP_INLINE 0 ++#define HAVE_CPUNOP_INLINE 0 ++#define HAVE_I686_INLINE 0 ++#define HAVE_MIPSFPU_INLINE 0 ++#define HAVE_MIPS32R2_INLINE 0 ++#define HAVE_MIPS32R5_INLINE 0 ++#define HAVE_MIPS64R2_INLINE 0 ++#define HAVE_MIPS32R6_INLINE 0 ++#define HAVE_MIPS64R6_INLINE 0 ++#define HAVE_MIPSDSP_INLINE 0 ++#define HAVE_MIPSDSPR2_INLINE 0 ++#define HAVE_MSA_INLINE 0 ++#define HAVE_LOONGSON2_INLINE 0 ++#define HAVE_LOONGSON3_INLINE 0 ++#define HAVE_MMI_INLINE 0 ++#define HAVE_LSX_INLINE 0 ++#define HAVE_LASX_INLINE 0 ++#define HAVE_ALIGNED_STACK 1 ++#define HAVE_FAST_64BIT 1 ++#define HAVE_FAST_CLZ 1 ++#define HAVE_FAST_CMOV 0 ++#define HAVE_FAST_FLOAT16 0 ++#define HAVE_LOCAL_ALIGNED 1 ++#define HAVE_SIMD_ALIGN_16 1 ++#define HAVE_SIMD_ALIGN_32 0 ++#define HAVE_SIMD_ALIGN_64 0 ++#define HAVE_ATOMIC_CAS_PTR 0 ++#define HAVE_MACHINE_RW_BARRIER 0 ++#define HAVE_MEMORYBARRIER 0 ++#define HAVE_MM_EMPTY 0 ++#define HAVE_RDTSC 0 ++#define HAVE_SEM_TIMEDWAIT 1 ++#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 ++#define HAVE_INLINE_ASM 1 ++#define HAVE_SYMVER 0 ++#define HAVE_X86ASM 0 ++#define HAVE_BIGENDIAN 0 ++#define HAVE_FAST_UNALIGNED 1 ++#define HAVE_ARPA_INET_H 0 ++#define HAVE_ASM_HWCAP_H 0 ++#define HAVE_ASM_TYPES_H 1 ++#define HAVE_CDIO_PARANOIA_H 0 ++#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 ++#define HAVE_CUDA_H 0 ++#define HAVE_DISPATCH_DISPATCH_H 0 ++#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 ++#define HAVE_DEV_IC_BT8XX_H 0 ++#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 ++#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 ++#define HAVE_DIRECT_H 0 ++#define HAVE_DIRENT_H 1 ++#define HAVE_DXGIDEBUG_H 0 ++#define HAVE_DXVA_H 0 ++#define HAVE_ES2_GL_H 0 ++#define HAVE_GSM_H 0 ++#define HAVE_IO_H 0 ++#define HAVE_LINUX_DMA_BUF_H 0 ++#define HAVE_LINUX_PERF_EVENT_H 1 ++#define HAVE_MACHINE_IOCTL_BT848_H 0 ++#define HAVE_MACHINE_IOCTL_METEOR_H 0 ++#define HAVE_MALLOC_H 1 ++#define HAVE_OPENCV2_CORE_CORE_C_H 0 ++#define HAVE_OPENGL_GL3_H 0 ++#define HAVE_POLL_H 1 ++#define HAVE_SYS_PARAM_H 1 ++#define HAVE_SYS_RESOURCE_H 1 ++#define HAVE_SYS_SELECT_H 1 ++#define HAVE_SYS_SOUNDCARD_H 1 ++#define HAVE_SYS_TIME_H 1 ++#define HAVE_SYS_UN_H 1 ++#define HAVE_SYS_VIDEOIO_H 0 ++#define HAVE_TERMIOS_H 1 ++#define HAVE_UDPLITE_H 0 ++#define HAVE_UNISTD_H 1 ++#define HAVE_VALGRIND_VALGRIND_H 0 /* #define HAVE_VALGRIND_VALGRIND_H 1 -- forced to 0. See https://crbug.com/590440 */ ++#define HAVE_WINDOWS_H 0 ++#define HAVE_WINSOCK2_H 0 ++#define HAVE_INTRINSICS_NEON 0 ++#define HAVE_ATANF 1 ++#define HAVE_ATAN2F 1 ++#define HAVE_CBRT 1 ++#define HAVE_CBRTF 1 ++#define HAVE_COPYSIGN 1 ++#define HAVE_COSF 1 ++#define HAVE_ERF 1 ++#define HAVE_EXP2 1 ++#define HAVE_EXP2F 1 ++#define HAVE_EXPF 1 ++#define HAVE_HYPOT 1 ++#define HAVE_ISFINITE 1 ++#define HAVE_ISINF 1 ++#define HAVE_ISNAN 1 ++#define HAVE_LDEXPF 1 ++#define HAVE_LLRINT 1 ++#define HAVE_LLRINTF 1 ++#define HAVE_LOG2 1 ++#define HAVE_LOG2F 1 ++#define HAVE_LOG10F 1 ++#define HAVE_LRINT 1 ++#define HAVE_LRINTF 1 ++#define HAVE_POWF 1 ++#define HAVE_RINT 1 ++#define HAVE_ROUND 1 ++#define HAVE_ROUNDF 1 ++#define HAVE_SINF 1 ++#define HAVE_TRUNC 1 ++#define HAVE_TRUNCF 1 ++#define HAVE_DOS_PATHS 0 ++#define HAVE_LIBC_MSVCRT 0 ++#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 ++#define HAVE_SECTION_DATA_REL_RO 1 ++#define HAVE_THREADS 1 ++#define HAVE_UWP 0 ++#define HAVE_WINRT 0 ++#define HAVE_ACCESS 1 ++#define HAVE_ALIGNED_MALLOC 0 ++#define HAVE_ARC4RANDOM_BUF 0 ++#define HAVE_CLOCK_GETTIME 1 ++#define HAVE_CLOSESOCKET 0 ++#define HAVE_COMMANDLINETOARGVW 0 ++#define HAVE_FCNTL 1 ++#define HAVE_GETADDRINFO 0 ++#define HAVE_GETAUXVAL 1 ++#define HAVE_GETENV 1 ++#define HAVE_GETHRTIME 0 ++#define HAVE_GETOPT 1 ++#define HAVE_GETMODULEHANDLE 0 ++#define HAVE_GETPROCESSAFFINITYMASK 0 ++#define HAVE_GETPROCESSMEMORYINFO 0 ++#define HAVE_GETPROCESSTIMES 0 ++#define HAVE_GETRUSAGE 1 ++#define HAVE_GETSTDHANDLE 0 ++#define HAVE_GETSYSTEMTIMEASFILETIME 0 ++#define HAVE_GETTIMEOFDAY 1 ++#define HAVE_GLOB 1 ++#define HAVE_GLXGETPROCADDRESS 0 ++#define HAVE_GMTIME_R 1 ++#define HAVE_INET_ATON 0 ++#define HAVE_ISATTY 1 ++#define HAVE_KBHIT 0 ++#define HAVE_LOCALTIME_R 1 ++#define HAVE_LSTAT 1 ++#define HAVE_LZO1X_999_COMPRESS 0 ++#define HAVE_MACH_ABSOLUTE_TIME 0 ++#define HAVE_MAPVIEWOFFILE 0 ++#define HAVE_MEMALIGN 1 ++#define HAVE_MKSTEMP 1 ++#define HAVE_MMAP 1 ++#define HAVE_MPROTECT 1 ++#define HAVE_NANOSLEEP 1 ++#define HAVE_PEEKNAMEDPIPE 0 ++#define HAVE_POSIX_MEMALIGN 1 ++#define HAVE_PRCTL 0 /* #define HAVE_PRCTL 1 -- forced to 0 for Fuchsia */ ++#define HAVE_PTHREAD_CANCEL 1 ++#define HAVE_SCHED_GETAFFINITY 1 ++#define HAVE_SECITEMIMPORT 0 ++#define HAVE_SETCONSOLETEXTATTRIBUTE 0 ++#define HAVE_SETCONSOLECTRLHANDLER 0 ++#define HAVE_SETDLLDIRECTORY 0 ++#define HAVE_SETMODE 0 ++#define HAVE_SETRLIMIT 1 ++#define HAVE_SLEEP 0 ++#define HAVE_STRERROR_R 1 ++#define HAVE_SYSCONF 1 ++#define HAVE_SYSCTL 0 /* #define HAVE_SYSCTL 0 -- forced to 0 for Fuchsia */ ++#define HAVE_SYSCTLBYNAME 0 ++#define HAVE_USLEEP 1 ++#define HAVE_UTGETOSTYPEFROMSTRING 0 ++#define HAVE_VIRTUALALLOC 0 ++#define HAVE_WGLGETPROCADDRESS 0 ++#define HAVE_BCRYPT 0 ++#define HAVE_VAAPI_DRM 0 ++#define HAVE_VAAPI_X11 0 ++#define HAVE_VAAPI_WIN32 0 ++#define HAVE_VDPAU_X11 0 ++#define HAVE_PTHREADS 1 ++#define HAVE_OS2THREADS 0 ++#define HAVE_W32THREADS 0 ++#define HAVE_AS_ARCH_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_DOTPROD_DIRECTIVE 0 ++#define HAVE_AS_ARCHEXT_I8MM_DIRECTIVE 0 ++#define HAVE_AS_DN_DIRECTIVE 0 ++#define HAVE_AS_FPU_DIRECTIVE 0 ++#define HAVE_AS_FUNC 0 ++#define HAVE_AS_OBJECT_ARCH 0 ++#define HAVE_ASM_MOD_Q 0 ++#define HAVE_BLOCKS_EXTENSION 0 ++#define HAVE_EBP_AVAILABLE 0 ++#define HAVE_EBX_AVAILABLE 0 ++#define HAVE_GNU_AS 0 ++#define HAVE_GNU_WINDRES 0 ++#define HAVE_IBM_ASM 1 ++#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 ++#define HAVE_INLINE_ASM_LABELS 1 ++#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 ++#define HAVE_PRAGMA_DEPRECATED 1 ++#define HAVE_RSYNC_CONTIMEOUT 1 ++#define HAVE_SYMVER_ASM_LABEL 1 ++#define HAVE_SYMVER_GNU_ASM 1 ++#define HAVE_VFP_ARGS 0 ++#define HAVE_XFORM_ASM 1 ++#define HAVE_XMM_CLOBBERS 0 ++#define HAVE_DPI_AWARENESS_CONTEXT 0 ++#define HAVE_IDXGIOUTPUT5 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVC 0 ++#define HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA 0 ++#define HAVE_KCMVIDEOCODECTYPE_VP9 0 ++#define HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_422YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE 0 ++#define HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR 0 ++#define HAVE_KCVIMAGEBUFFERYCBCRMATRIX_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERCOLORPRIMARIES_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2020 0 ++#define HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_428_1 0 ++#define HAVE_SOCKLEN_T 0 ++#define HAVE_STRUCT_ADDRINFO 0 ++#define HAVE_STRUCT_GROUP_SOURCE_REQ 0 ++#define HAVE_STRUCT_IP_MREQ_SOURCE 0 ++#define HAVE_STRUCT_IPV6_MREQ 0 ++#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 0 ++#define HAVE_STRUCT_POLLFD 0 ++#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 ++#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 ++#define HAVE_STRUCT_SOCKADDR_IN6 0 ++#define HAVE_STRUCT_SOCKADDR_SA_LEN 0 ++#define HAVE_STRUCT_SOCKADDR_STORAGE 0 ++#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 ++#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 ++#define HAVE_GZIP 1 ++#define HAVE_LIBDRM_GETFB2 0 ++#define HAVE_MAKEINFO 0 ++#define HAVE_MAKEINFO_HTML 0 ++#define HAVE_OPENCL_D3D11 0 ++#define HAVE_OPENCL_DRM_ARM 0 ++#define HAVE_OPENCL_DRM_BEIGNET 0 ++#define HAVE_OPENCL_DXVA2 0 ++#define HAVE_OPENCL_VAAPI_BEIGNET 0 ++#define HAVE_OPENCL_VAAPI_INTEL_MEDIA 0 ++#define HAVE_PERL 1 ++#define HAVE_POD2MAN 1 ++#define HAVE_TEXI2HTML 0 ++#define HAVE_XMLLINT 1 ++#define HAVE_ZLIB_GZIP 0 ++#define HAVE_OPENVINO2 0 ++#define CONFIG_DOC 0 ++#define CONFIG_HTMLPAGES 0 ++#define CONFIG_MANPAGES 0 ++#define CONFIG_PODPAGES 0 ++#define CONFIG_TXTPAGES 0 ++#define CONFIG_AVIO_HTTP_SERVE_FILES_EXAMPLE 1 ++#define CONFIG_AVIO_LIST_DIR_EXAMPLE 1 ++#define CONFIG_AVIO_READ_CALLBACK_EXAMPLE 1 ++#define CONFIG_DECODE_AUDIO_EXAMPLE 1 ++#define CONFIG_DECODE_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_DECODE_FILTER_VIDEO_EXAMPLE 0 ++#define CONFIG_DECODE_VIDEO_EXAMPLE 1 ++#define CONFIG_DEMUX_DECODE_EXAMPLE 1 ++#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 ++#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 ++#define CONFIG_EXTRACT_MVS_EXAMPLE 1 ++#define CONFIG_FILTER_AUDIO_EXAMPLE 0 ++#define CONFIG_HW_DECODE_EXAMPLE 1 ++#define CONFIG_MUX_EXAMPLE 0 ++#define CONFIG_QSV_DECODE_EXAMPLE 0 ++#define CONFIG_REMUX_EXAMPLE 1 ++#define CONFIG_RESAMPLE_AUDIO_EXAMPLE 0 ++#define CONFIG_SCALE_VIDEO_EXAMPLE 0 ++#define CONFIG_SHOW_METADATA_EXAMPLE 1 ++#define CONFIG_TRANSCODE_AAC_EXAMPLE 0 ++#define CONFIG_TRANSCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_ENCODE_EXAMPLE 0 ++#define CONFIG_VAAPI_TRANSCODE_EXAMPLE 0 ++#define CONFIG_QSV_TRANSCODE_EXAMPLE 0 ++#define CONFIG_AVISYNTH 0 ++#define CONFIG_FREI0R 0 ++#define CONFIG_LIBCDIO 0 ++#define CONFIG_LIBDAVS2 0 ++#define CONFIG_LIBRUBBERBAND 0 ++#define CONFIG_LIBVIDSTAB 0 ++#define CONFIG_LIBX264 0 ++#define CONFIG_LIBX265 0 ++#define CONFIG_LIBXAVS 0 ++#define CONFIG_LIBXAVS2 0 ++#define CONFIG_LIBXVID 0 ++#define CONFIG_DECKLINK 0 ++#define CONFIG_LIBFDK_AAC 0 ++#define CONFIG_LIBTLS 0 ++#define CONFIG_GMP 0 ++#define CONFIG_LIBARIBB24 0 ++#define CONFIG_LIBLENSFUN 0 ++#define CONFIG_LIBOPENCORE_AMRNB 0 ++#define CONFIG_LIBOPENCORE_AMRWB 0 ++#define CONFIG_LIBVO_AMRWBENC 0 ++#define CONFIG_MBEDTLS 0 ++#define CONFIG_RKMPP 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_CHROMAPRINT 0 ++#define CONFIG_GCRYPT 0 ++#define CONFIG_GNUTLS 0 ++#define CONFIG_JNI 0 ++#define CONFIG_LADSPA 0 ++#define CONFIG_LCMS2 0 ++#define CONFIG_LIBAOM 0 ++#define CONFIG_LIBARIBCAPTION 0 ++#define CONFIG_LIBASS 0 ++#define CONFIG_LIBBLURAY 0 ++#define CONFIG_LIBBS2B 0 ++#define CONFIG_LIBCACA 0 ++#define CONFIG_LIBCELT 0 ++#define CONFIG_LIBCODEC2 0 ++#define CONFIG_LIBDAV1D 0 ++#define CONFIG_LIBDC1394 0 ++#define CONFIG_LIBDRM 0 ++#define CONFIG_LIBFLITE 0 ++#define CONFIG_LIBFONTCONFIG 0 ++#define CONFIG_LIBFREETYPE 0 ++#define CONFIG_LIBFRIBIDI 0 ++#define CONFIG_LIBHARFBUZZ 0 ++#define CONFIG_LIBGLSLANG 0 ++#define CONFIG_LIBGME 0 ++#define CONFIG_LIBGSM 0 ++#define CONFIG_LIBIEC61883 0 ++#define CONFIG_LIBILBC 0 ++#define CONFIG_LIBJACK 0 ++#define CONFIG_LIBJXL 0 ++#define CONFIG_LIBKLVANC 0 ++#define CONFIG_LIBKVAZAAR 0 ++#define CONFIG_LIBMODPLUG 0 ++#define CONFIG_LIBMP3LAME 0 ++#define CONFIG_LIBMYSOFA 0 ++#define CONFIG_LIBOPENCV 0 ++#define CONFIG_LIBOPENH264 0 ++#define CONFIG_LIBOPENJPEG 0 ++#define CONFIG_LIBOPENMPT 0 ++#define CONFIG_LIBOPENVINO 0 ++#define CONFIG_LIBOPUS 1 ++#define CONFIG_LIBPLACEBO 0 ++#define CONFIG_LIBPULSE 0 ++#define CONFIG_LIBRABBITMQ 0 ++#define CONFIG_LIBRAV1E 0 ++#define CONFIG_LIBRIST 0 ++#define CONFIG_LIBRSVG 0 ++#define CONFIG_LIBRTMP 0 ++#define CONFIG_LIBSHADERC 0 ++#define CONFIG_LIBSHINE 0 ++#define CONFIG_LIBSMBCLIENT 0 ++#define CONFIG_LIBSNAPPY 0 ++#define CONFIG_LIBSOXR 0 ++#define CONFIG_LIBSPEEX 0 ++#define CONFIG_LIBSRT 0 ++#define CONFIG_LIBSSH 0 ++#define CONFIG_LIBSVTAV1 0 ++#define CONFIG_LIBTENSORFLOW 0 ++#define CONFIG_LIBTESSERACT 0 ++#define CONFIG_LIBTHEORA 0 ++#define CONFIG_LIBTWOLAME 0 ++#define CONFIG_LIBUAVS3D 0 ++#define CONFIG_LIBV4L2 0 ++#define CONFIG_LIBVMAF 0 ++#define CONFIG_LIBVORBIS 0 ++#define CONFIG_LIBVPX 0 ++#define CONFIG_LIBWEBP 0 ++#define CONFIG_LIBXML2 0 ++#define CONFIG_LIBZIMG 0 ++#define CONFIG_LIBZMQ 0 ++#define CONFIG_LIBZVBI 0 ++#define CONFIG_LV2 0 ++#define CONFIG_MEDIACODEC 0 ++#define CONFIG_OPENAL 0 ++#define CONFIG_OPENGL 0 ++#define CONFIG_OPENSSL 0 ++#define CONFIG_POCKETSPHINX 0 ++#define CONFIG_VAPOURSYNTH 0 ++#define CONFIG_ALSA 0 ++#define CONFIG_APPKIT 0 ++#define CONFIG_AVFOUNDATION 0 ++#define CONFIG_BZLIB 0 ++#define CONFIG_COREIMAGE 0 ++#define CONFIG_ICONV 0 ++#define CONFIG_LIBXCB 0 ++#define CONFIG_LIBXCB_SHM 0 ++#define CONFIG_LIBXCB_SHAPE 0 ++#define CONFIG_LIBXCB_XFIXES 0 ++#define CONFIG_LZMA 0 ++#define CONFIG_MEDIAFOUNDATION 0 ++#define CONFIG_METAL 0 ++#define CONFIG_SCHANNEL 0 ++#define CONFIG_SDL2 0 ++#define CONFIG_SECURETRANSPORT 0 ++#define CONFIG_SNDIO 0 ++#define CONFIG_XLIB 0 ++#define CONFIG_ZLIB 0 ++#define CONFIG_CUDA_NVCC 0 ++#define CONFIG_CUDA_SDK 0 ++#define CONFIG_LIBNPP 0 ++#define CONFIG_LIBMFX 0 ++#define CONFIG_LIBVPL 0 ++#define CONFIG_MMAL 0 ++#define CONFIG_OMX 0 ++#define CONFIG_OPENCL 0 ++#define CONFIG_AMF 0 ++#define CONFIG_AUDIOTOOLBOX 0 ++#define CONFIG_CRYSTALHD 0 ++#define CONFIG_CUDA 0 ++#define CONFIG_CUDA_LLVM 0 ++#define CONFIG_CUVID 0 ++#define CONFIG_D3D11VA 0 ++#define CONFIG_DXVA2 0 ++#define CONFIG_FFNVCODEC 0 ++#define CONFIG_NVDEC 0 ++#define CONFIG_NVENC 0 ++#define CONFIG_VAAPI 0 ++#define CONFIG_VDPAU 0 ++#define CONFIG_VIDEOTOOLBOX 0 ++#define CONFIG_VULKAN 0 ++#define CONFIG_V4L2_M2M 0 ++#define CONFIG_FTRAPV 0 ++#define CONFIG_GRAY 0 ++#define CONFIG_HARDCODED_TABLES 0 ++#define CONFIG_OMX_RPI 0 ++#define CONFIG_RUNTIME_CPUDETECT 1 ++#define CONFIG_SAFE_BITSTREAM_READER 1 ++#define CONFIG_SHARED 0 ++#define CONFIG_SMALL 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SWSCALE_ALPHA 1 ++#define CONFIG_GPL 0 ++#define CONFIG_NONFREE 0 ++#define CONFIG_VERSION3 0 ++#define CONFIG_AVDEVICE 0 ++#define CONFIG_AVFILTER 0 ++#define CONFIG_SWSCALE 0 ++#define CONFIG_POSTPROC 0 ++#define CONFIG_AVFORMAT 1 ++#define CONFIG_AVCODEC 1 ++#define CONFIG_SWRESAMPLE 0 ++#define CONFIG_AVUTIL 1 ++#define CONFIG_FFPLAY 0 ++#define CONFIG_FFPROBE 0 ++#define CONFIG_FFMPEG 0 ++#define CONFIG_DWT 0 ++#define CONFIG_ERROR_RESILIENCE 0 ++#define CONFIG_FAAN 0 ++#define CONFIG_FAST_UNALIGNED 1 ++#define CONFIG_LSP 0 ++#define CONFIG_PIXELUTILS 0 ++#define CONFIG_NETWORK 0 ++#define CONFIG_AUTODETECT 0 ++#define CONFIG_FONTCONFIG 0 ++#define CONFIG_LARGE_TESTS 1 ++#define CONFIG_LINUX_PERF 0 ++#define CONFIG_MACOS_KPERF 0 ++#define CONFIG_MEMORY_POISONING 0 ++#define CONFIG_NEON_CLOBBER_TEST 0 ++#define CONFIG_OSSFUZZ 0 ++#define CONFIG_PIC 1 ++#define CONFIG_PTX_COMPRESSION 0 ++#define CONFIG_THUMB 0 ++#define CONFIG_VALGRIND_BACKTRACE 0 ++#define CONFIG_XMM_CLOBBER_TEST 0 ++#define CONFIG_BSFS 0 ++#define CONFIG_DECODERS 1 ++#define CONFIG_ENCODERS 0 ++#define CONFIG_HWACCELS 0 ++#define CONFIG_PARSERS 1 ++#define CONFIG_INDEVS 0 ++#define CONFIG_OUTDEVS 0 ++#define CONFIG_FILTERS 0 ++#define CONFIG_DEMUXERS 1 ++#define CONFIG_MUXERS 0 ++#define CONFIG_PROTOCOLS 0 ++#define CONFIG_AANDCTTABLES 0 ++#define CONFIG_AC3DSP 0 ++#define CONFIG_ADTS_HEADER 0 ++#define CONFIG_ATSC_A53 0 ++#define CONFIG_AUDIO_FRAME_QUEUE 0 ++#define CONFIG_AUDIODSP 0 ++#define CONFIG_BLOCKDSP 0 ++#define CONFIG_BSWAPDSP 0 ++#define CONFIG_CABAC 0 ++#define CONFIG_CBS 0 ++#define CONFIG_CBS_AV1 0 ++#define CONFIG_CBS_H264 0 ++#define CONFIG_CBS_H265 0 ++#define CONFIG_CBS_H266 0 ++#define CONFIG_CBS_JPEG 0 ++#define CONFIG_CBS_MPEG2 0 ++#define CONFIG_CBS_VP9 0 ++#define CONFIG_DEFLATE_WRAPPER 0 ++#define CONFIG_DIRAC_PARSE 1 ++#define CONFIG_DNN 0 ++#define CONFIG_DOVI_RPU 0 ++#define CONFIG_DVPROFILE 0 ++#define CONFIG_EVCPARSE 0 ++#define CONFIG_EXIF 0 ++#define CONFIG_FAANDCT 0 ++#define CONFIG_FAANIDCT 0 ++#define CONFIG_FDCTDSP 0 ++#define CONFIG_FMTCONVERT 0 ++#define CONFIG_FRAME_THREAD_ENCODER 0 ++#define CONFIG_G722DSP 0 ++#define CONFIG_GOLOMB 1 ++#define CONFIG_GPLV3 0 ++#define CONFIG_H263DSP 0 ++#define CONFIG_H264CHROMA 0 ++#define CONFIG_H264DSP 0 ++#define CONFIG_H264PARSE 0 ++#define CONFIG_H264PRED 1 ++#define CONFIG_H264QPEL 0 ++#define CONFIG_H264_SEI 0 ++#define CONFIG_HEVCPARSE 0 ++#define CONFIG_HEVC_SEI 0 ++#define CONFIG_HPELDSP 1 ++#define CONFIG_HUFFMAN 0 ++#define CONFIG_HUFFYUVDSP 0 ++#define CONFIG_HUFFYUVENCDSP 0 ++#define CONFIG_IDCTDSP 0 ++#define CONFIG_IIRFILTER 0 ++#define CONFIG_INFLATE_WRAPPER 0 ++#define CONFIG_INTRAX8 0 ++#define CONFIG_ISO_MEDIA 1 ++#define CONFIG_IVIDSP 0 ++#define CONFIG_JPEGTABLES 0 ++#define CONFIG_LGPLV3 0 ++#define CONFIG_LIBX262 0 ++#define CONFIG_LLAUDDSP 0 ++#define CONFIG_LLVIDDSP 0 ++#define CONFIG_LLVIDENCDSP 0 ++#define CONFIG_LPC 0 ++#define CONFIG_LZF 0 ++#define CONFIG_ME_CMP 0 ++#define CONFIG_MPEG_ER 0 ++#define CONFIG_MPEGAUDIO 1 ++#define CONFIG_MPEGAUDIODSP 1 ++#define CONFIG_MPEGAUDIOHEADER 1 ++#define CONFIG_MPEG4AUDIO 1 ++#define CONFIG_MPEGVIDEO 0 ++#define CONFIG_MPEGVIDEODEC 0 ++#define CONFIG_MPEGVIDEOENC 0 ++#define CONFIG_MSMPEG4DEC 0 ++#define CONFIG_MSMPEG4ENC 0 ++#define CONFIG_MSS34DSP 0 ++#define CONFIG_PIXBLOCKDSP 0 ++#define CONFIG_QPELDSP 0 ++#define CONFIG_QSV 0 ++#define CONFIG_QSVDEC 0 ++#define CONFIG_QSVENC 0 ++#define CONFIG_QSVVPP 0 ++#define CONFIG_RANGECODER 0 ++#define CONFIG_RIFFDEC 1 ++#define CONFIG_RIFFENC 0 ++#define CONFIG_RTPDEC 0 ++#define CONFIG_RTPENC_CHAIN 0 ++#define CONFIG_RV34DSP 0 ++#define CONFIG_SCENE_SAD 0 ++#define CONFIG_SINEWIN 0 ++#define CONFIG_SNAPPY 0 ++#define CONFIG_SRTP 0 ++#define CONFIG_STARTCODE 0 ++#define CONFIG_TEXTUREDSP 0 ++#define CONFIG_TEXTUREDSPENC 0 ++#define CONFIG_TPELDSP 0 ++#define CONFIG_VAAPI_1 0 ++#define CONFIG_VAAPI_ENCODE 0 ++#define CONFIG_VC1DSP 0 ++#define CONFIG_VIDEODSP 1 ++#define CONFIG_VP3DSP 1 ++#define CONFIG_VP56DSP 0 ++#define CONFIG_VP8DSP 1 ++#define CONFIG_WMA_FREQS 0 ++#define CONFIG_WMV2DSP 0 ++#endif /* FFMPEG_CONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/config_components.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/config_components.h +@@ -0,0 +1,2196 @@ ++/* Automatically generated by configure - do not modify! */ ++#ifndef FFMPEG_CONFIG_COMPONENTS_H ++#define FFMPEG_CONFIG_COMPONENTS_H ++#define CONFIG_AAC_ADTSTOASC_BSF 0 ++#define CONFIG_AV1_FRAME_MERGE_BSF 0 ++#define CONFIG_AV1_FRAME_SPLIT_BSF 0 ++#define CONFIG_AV1_METADATA_BSF 0 ++#define CONFIG_CHOMP_BSF 0 ++#define CONFIG_DUMP_EXTRADATA_BSF 0 ++#define CONFIG_DCA_CORE_BSF 0 ++#define CONFIG_DTS2PTS_BSF 0 ++#define CONFIG_DV_ERROR_MARKER_BSF 0 ++#define CONFIG_EAC3_CORE_BSF 0 ++#define CONFIG_EXTRACT_EXTRADATA_BSF 0 ++#define CONFIG_FILTER_UNITS_BSF 0 ++#define CONFIG_H264_METADATA_BSF 0 ++#define CONFIG_H264_MP4TOANNEXB_BSF 0 ++#define CONFIG_H264_REDUNDANT_PPS_BSF 0 ++#define CONFIG_HAPQA_EXTRACT_BSF 0 ++#define CONFIG_HEVC_METADATA_BSF 0 ++#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_IMX_DUMP_HEADER_BSF 0 ++#define CONFIG_MEDIA100_TO_MJPEGB_BSF 0 ++#define CONFIG_MJPEG2JPEG_BSF 0 ++#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 ++#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 ++#define CONFIG_MPEG2_METADATA_BSF 0 ++#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 ++#define CONFIG_MOV2TEXTSUB_BSF 0 ++#define CONFIG_NOISE_BSF 0 ++#define CONFIG_NULL_BSF 0 ++#define CONFIG_OPUS_METADATA_BSF 0 ++#define CONFIG_PCM_RECHUNK_BSF 0 ++#define CONFIG_PGS_FRAME_MERGE_BSF 0 ++#define CONFIG_PRORES_METADATA_BSF 0 ++#define CONFIG_REMOVE_EXTRADATA_BSF 0 ++#define CONFIG_SETTS_BSF 0 ++#define CONFIG_TEXT2MOVSUB_BSF 0 ++#define CONFIG_TRACE_HEADERS_BSF 0 ++#define CONFIG_TRUEHD_CORE_BSF 0 ++#define CONFIG_VP9_METADATA_BSF 0 ++#define CONFIG_VP9_RAW_REORDER_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_BSF 0 ++#define CONFIG_VP9_SUPERFRAME_SPLIT_BSF 0 ++#define CONFIG_VVC_METADATA_BSF 0 ++#define CONFIG_VVC_MP4TOANNEXB_BSF 0 ++#define CONFIG_EVC_FRAME_MERGE_BSF 0 ++#define CONFIG_AASC_DECODER 0 ++#define CONFIG_AIC_DECODER 0 ++#define CONFIG_ALIAS_PIX_DECODER 0 ++#define CONFIG_AGM_DECODER 0 ++#define CONFIG_AMV_DECODER 0 ++#define CONFIG_ANM_DECODER 0 ++#define CONFIG_ANSI_DECODER 0 ++#define CONFIG_APNG_DECODER 0 ++#define CONFIG_ARBC_DECODER 0 ++#define CONFIG_ARGO_DECODER 0 ++#define CONFIG_ASV1_DECODER 0 ++#define CONFIG_ASV2_DECODER 0 ++#define CONFIG_AURA_DECODER 0 ++#define CONFIG_AURA2_DECODER 0 ++#define CONFIG_AVRP_DECODER 0 ++#define CONFIG_AVRN_DECODER 0 ++#define CONFIG_AVS_DECODER 0 ++#define CONFIG_AVUI_DECODER 0 ++#define CONFIG_AYUV_DECODER 0 ++#define CONFIG_BETHSOFTVID_DECODER 0 ++#define CONFIG_BFI_DECODER 0 ++#define CONFIG_BINK_DECODER 0 ++#define CONFIG_BITPACKED_DECODER 0 ++#define CONFIG_BMP_DECODER 0 ++#define CONFIG_BMV_VIDEO_DECODER 0 ++#define CONFIG_BRENDER_PIX_DECODER 0 ++#define CONFIG_C93_DECODER 0 ++#define CONFIG_CAVS_DECODER 0 ++#define CONFIG_CDGRAPHICS_DECODER 0 ++#define CONFIG_CDTOONS_DECODER 0 ++#define CONFIG_CDXL_DECODER 0 ++#define CONFIG_CFHD_DECODER 0 ++#define CONFIG_CINEPAK_DECODER 0 ++#define CONFIG_CLEARVIDEO_DECODER 0 ++#define CONFIG_CLJR_DECODER 0 ++#define CONFIG_CLLC_DECODER 0 ++#define CONFIG_COMFORTNOISE_DECODER 0 ++#define CONFIG_CPIA_DECODER 0 ++#define CONFIG_CRI_DECODER 0 ++#define CONFIG_CSCD_DECODER 0 ++#define CONFIG_CYUV_DECODER 0 ++#define CONFIG_DDS_DECODER 0 ++#define CONFIG_DFA_DECODER 0 ++#define CONFIG_DIRAC_DECODER 0 ++#define CONFIG_DNXHD_DECODER 0 ++#define CONFIG_DPX_DECODER 0 ++#define CONFIG_DSICINVIDEO_DECODER 0 ++#define CONFIG_DVAUDIO_DECODER 0 ++#define CONFIG_DVVIDEO_DECODER 0 ++#define CONFIG_DXA_DECODER 0 ++#define CONFIG_DXTORY_DECODER 0 ++#define CONFIG_DXV_DECODER 0 ++#define CONFIG_EACMV_DECODER 0 ++#define CONFIG_EAMAD_DECODER 0 ++#define CONFIG_EATGQ_DECODER 0 ++#define CONFIG_EATGV_DECODER 0 ++#define CONFIG_EATQI_DECODER 0 ++#define CONFIG_EIGHTBPS_DECODER 0 ++#define CONFIG_EIGHTSVX_EXP_DECODER 0 ++#define CONFIG_EIGHTSVX_FIB_DECODER 0 ++#define CONFIG_ESCAPE124_DECODER 0 ++#define CONFIG_ESCAPE130_DECODER 0 ++#define CONFIG_EXR_DECODER 0 ++#define CONFIG_FFV1_DECODER 0 ++#define CONFIG_FFVHUFF_DECODER 0 ++#define CONFIG_FIC_DECODER 0 ++#define CONFIG_FITS_DECODER 0 ++#define CONFIG_FLASHSV_DECODER 0 ++#define CONFIG_FLASHSV2_DECODER 0 ++#define CONFIG_FLIC_DECODER 0 ++#define CONFIG_FLV_DECODER 0 ++#define CONFIG_FMVC_DECODER 0 ++#define CONFIG_FOURXM_DECODER 0 ++#define CONFIG_FRAPS_DECODER 0 ++#define CONFIG_FRWU_DECODER 0 ++#define CONFIG_G2M_DECODER 0 ++#define CONFIG_GDV_DECODER 0 ++#define CONFIG_GEM_DECODER 0 ++#define CONFIG_GIF_DECODER 0 ++#define CONFIG_H261_DECODER 0 ++#define CONFIG_H263_DECODER 0 ++#define CONFIG_H263I_DECODER 0 ++#define CONFIG_H263P_DECODER 0 ++#define CONFIG_H263_V4L2M2M_DECODER 0 ++#define CONFIG_H264_DECODER 0 ++#define CONFIG_H264_CRYSTALHD_DECODER 0 ++#define CONFIG_H264_V4L2M2M_DECODER 0 ++#define CONFIG_H264_MEDIACODEC_DECODER 0 ++#define CONFIG_H264_MMAL_DECODER 0 ++#define CONFIG_H264_QSV_DECODER 0 ++#define CONFIG_H264_RKMPP_DECODER 0 ++#define CONFIG_HAP_DECODER 0 ++#define CONFIG_HEVC_DECODER 0 ++#define CONFIG_HEVC_QSV_DECODER 0 ++#define CONFIG_HEVC_RKMPP_DECODER 0 ++#define CONFIG_HEVC_V4L2M2M_DECODER 0 ++#define CONFIG_HNM4_VIDEO_DECODER 0 ++#define CONFIG_HQ_HQA_DECODER 0 ++#define CONFIG_HQX_DECODER 0 ++#define CONFIG_HUFFYUV_DECODER 0 ++#define CONFIG_HYMT_DECODER 0 ++#define CONFIG_IDCIN_DECODER 0 ++#define CONFIG_IFF_ILBM_DECODER 0 ++#define CONFIG_IMM4_DECODER 0 ++#define CONFIG_IMM5_DECODER 0 ++#define CONFIG_INDEO2_DECODER 0 ++#define CONFIG_INDEO3_DECODER 0 ++#define CONFIG_INDEO4_DECODER 0 ++#define CONFIG_INDEO5_DECODER 0 ++#define CONFIG_INTERPLAY_VIDEO_DECODER 0 ++#define CONFIG_IPU_DECODER 0 ++#define CONFIG_JPEG2000_DECODER 0 ++#define CONFIG_JPEGLS_DECODER 0 ++#define CONFIG_JV_DECODER 0 ++#define CONFIG_KGV1_DECODER 0 ++#define CONFIG_KMVC_DECODER 0 ++#define CONFIG_LAGARITH_DECODER 0 ++#define CONFIG_LOCO_DECODER 0 ++#define CONFIG_LSCR_DECODER 0 ++#define CONFIG_M101_DECODER 0 ++#define CONFIG_MAGICYUV_DECODER 0 ++#define CONFIG_MDEC_DECODER 0 ++#define CONFIG_MEDIA100_DECODER 0 ++#define CONFIG_MIMIC_DECODER 0 ++#define CONFIG_MJPEG_DECODER 0 ++#define CONFIG_MJPEGB_DECODER 0 ++#define CONFIG_MMVIDEO_DECODER 0 ++#define CONFIG_MOBICLIP_DECODER 0 ++#define CONFIG_MOTIONPIXELS_DECODER 0 ++#define CONFIG_MPEG1VIDEO_DECODER 0 ++#define CONFIG_MPEG2VIDEO_DECODER 0 ++#define CONFIG_MPEG4_DECODER 0 ++#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG4_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG4_MMAL_DECODER 0 ++#define CONFIG_MPEGVIDEO_DECODER 0 ++#define CONFIG_MPEG1_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_MMAL_DECODER 0 ++#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 ++#define CONFIG_MPEG2_V4L2M2M_DECODER 0 ++#define CONFIG_MPEG2_QSV_DECODER 0 ++#define CONFIG_MPEG2_MEDIACODEC_DECODER 0 ++#define CONFIG_MSA1_DECODER 0 ++#define CONFIG_MSCC_DECODER 0 ++#define CONFIG_MSMPEG4V1_DECODER 0 ++#define CONFIG_MSMPEG4V2_DECODER 0 ++#define CONFIG_MSMPEG4V3_DECODER 0 ++#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 ++#define CONFIG_MSP2_DECODER 0 ++#define CONFIG_MSRLE_DECODER 0 ++#define CONFIG_MSS1_DECODER 0 ++#define CONFIG_MSS2_DECODER 0 ++#define CONFIG_MSVIDEO1_DECODER 0 ++#define CONFIG_MSZH_DECODER 0 ++#define CONFIG_MTS2_DECODER 0 ++#define CONFIG_MV30_DECODER 0 ++#define CONFIG_MVC1_DECODER 0 ++#define CONFIG_MVC2_DECODER 0 ++#define CONFIG_MVDV_DECODER 0 ++#define CONFIG_MVHA_DECODER 0 ++#define CONFIG_MWSC_DECODER 0 ++#define CONFIG_MXPEG_DECODER 0 ++#define CONFIG_NOTCHLC_DECODER 0 ++#define CONFIG_NUV_DECODER 0 ++#define CONFIG_PAF_VIDEO_DECODER 0 ++#define CONFIG_PAM_DECODER 0 ++#define CONFIG_PBM_DECODER 0 ++#define CONFIG_PCX_DECODER 0 ++#define CONFIG_PDV_DECODER 0 ++#define CONFIG_PFM_DECODER 0 ++#define CONFIG_PGM_DECODER 0 ++#define CONFIG_PGMYUV_DECODER 0 ++#define CONFIG_PGX_DECODER 0 ++#define CONFIG_PHM_DECODER 0 ++#define CONFIG_PHOTOCD_DECODER 0 ++#define CONFIG_PICTOR_DECODER 0 ++#define CONFIG_PIXLET_DECODER 0 ++#define CONFIG_PNG_DECODER 0 ++#define CONFIG_PPM_DECODER 0 ++#define CONFIG_PRORES_DECODER 0 ++#define CONFIG_PROSUMER_DECODER 0 ++#define CONFIG_PSD_DECODER 0 ++#define CONFIG_PTX_DECODER 0 ++#define CONFIG_QDRAW_DECODER 0 ++#define CONFIG_QOI_DECODER 0 ++#define CONFIG_QPEG_DECODER 0 ++#define CONFIG_QTRLE_DECODER 0 ++#define CONFIG_R10K_DECODER 0 ++#define CONFIG_R210_DECODER 0 ++#define CONFIG_RASC_DECODER 0 ++#define CONFIG_RAWVIDEO_DECODER 0 ++#define CONFIG_RKA_DECODER 0 ++#define CONFIG_RL2_DECODER 0 ++#define CONFIG_ROQ_DECODER 0 ++#define CONFIG_RPZA_DECODER 0 ++#define CONFIG_RSCC_DECODER 0 ++#define CONFIG_RTV1_DECODER 0 ++#define CONFIG_RV10_DECODER 0 ++#define CONFIG_RV20_DECODER 0 ++#define CONFIG_RV30_DECODER 0 ++#define CONFIG_RV40_DECODER 0 ++#define CONFIG_S302M_DECODER 0 ++#define CONFIG_SANM_DECODER 0 ++#define CONFIG_SCPR_DECODER 0 ++#define CONFIG_SCREENPRESSO_DECODER 0 ++#define CONFIG_SGA_DECODER 0 ++#define CONFIG_SGI_DECODER 0 ++#define CONFIG_SGIRLE_DECODER 0 ++#define CONFIG_SHEERVIDEO_DECODER 0 ++#define CONFIG_SIMBIOSIS_IMX_DECODER 0 ++#define CONFIG_SMACKER_DECODER 0 ++#define CONFIG_SMC_DECODER 0 ++#define CONFIG_SMVJPEG_DECODER 0 ++#define CONFIG_SNOW_DECODER 0 ++#define CONFIG_SP5X_DECODER 0 ++#define CONFIG_SPEEDHQ_DECODER 0 ++#define CONFIG_SPEEX_DECODER 0 ++#define CONFIG_SRGC_DECODER 0 ++#define CONFIG_SUNRAST_DECODER 0 ++#define CONFIG_SVQ1_DECODER 0 ++#define CONFIG_SVQ3_DECODER 0 ++#define CONFIG_TARGA_DECODER 0 ++#define CONFIG_TARGA_Y216_DECODER 0 ++#define CONFIG_TDSC_DECODER 0 ++#define CONFIG_THEORA_DECODER 1 ++#define CONFIG_THP_DECODER 0 ++#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 ++#define CONFIG_TIFF_DECODER 0 ++#define CONFIG_TMV_DECODER 0 ++#define CONFIG_TRUEMOTION1_DECODER 0 ++#define CONFIG_TRUEMOTION2_DECODER 0 ++#define CONFIG_TRUEMOTION2RT_DECODER 0 ++#define CONFIG_TSCC_DECODER 0 ++#define CONFIG_TSCC2_DECODER 0 ++#define CONFIG_TXD_DECODER 0 ++#define CONFIG_ULTI_DECODER 0 ++#define CONFIG_UTVIDEO_DECODER 0 ++#define CONFIG_V210_DECODER 0 ++#define CONFIG_V210X_DECODER 0 ++#define CONFIG_V308_DECODER 0 ++#define CONFIG_V408_DECODER 0 ++#define CONFIG_V410_DECODER 0 ++#define CONFIG_VB_DECODER 0 ++#define CONFIG_VBN_DECODER 0 ++#define CONFIG_VBLE_DECODER 0 ++#define CONFIG_VC1_DECODER 0 ++#define CONFIG_VC1_CRYSTALHD_DECODER 0 ++#define CONFIG_VC1IMAGE_DECODER 0 ++#define CONFIG_VC1_MMAL_DECODER 0 ++#define CONFIG_VC1_QSV_DECODER 0 ++#define CONFIG_VC1_V4L2M2M_DECODER 0 ++#define CONFIG_VCR1_DECODER 0 ++#define CONFIG_VMDVIDEO_DECODER 0 ++#define CONFIG_VMIX_DECODER 0 ++#define CONFIG_VMNC_DECODER 0 ++#define CONFIG_VP3_DECODER 1 ++#define CONFIG_VP4_DECODER 0 ++#define CONFIG_VP5_DECODER 0 ++#define CONFIG_VP6_DECODER 0 ++#define CONFIG_VP6A_DECODER 0 ++#define CONFIG_VP6F_DECODER 0 ++#define CONFIG_VP7_DECODER 0 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP8_RKMPP_DECODER 0 ++#define CONFIG_VP8_V4L2M2M_DECODER 0 ++#define CONFIG_VP9_DECODER 0 ++#define CONFIG_VP9_RKMPP_DECODER 0 ++#define CONFIG_VP9_V4L2M2M_DECODER 0 ++#define CONFIG_VQA_DECODER 0 ++#define CONFIG_VQC_DECODER 0 ++#define CONFIG_WBMP_DECODER 0 ++#define CONFIG_WEBP_DECODER 0 ++#define CONFIG_WCMV_DECODER 0 ++#define CONFIG_WRAPPED_AVFRAME_DECODER 0 ++#define CONFIG_WMV1_DECODER 0 ++#define CONFIG_WMV2_DECODER 0 ++#define CONFIG_WMV3_DECODER 0 ++#define CONFIG_WMV3_CRYSTALHD_DECODER 0 ++#define CONFIG_WMV3IMAGE_DECODER 0 ++#define CONFIG_WNV1_DECODER 0 ++#define CONFIG_XAN_WC3_DECODER 0 ++#define CONFIG_XAN_WC4_DECODER 0 ++#define CONFIG_XBM_DECODER 0 ++#define CONFIG_XFACE_DECODER 0 ++#define CONFIG_XL_DECODER 0 ++#define CONFIG_XPM_DECODER 0 ++#define CONFIG_XWD_DECODER 0 ++#define CONFIG_Y41P_DECODER 0 ++#define CONFIG_YLC_DECODER 0 ++#define CONFIG_YOP_DECODER 0 ++#define CONFIG_YUV4_DECODER 0 ++#define CONFIG_ZERO12V_DECODER 0 ++#define CONFIG_ZEROCODEC_DECODER 0 ++#define CONFIG_ZLIB_DECODER 0 ++#define CONFIG_ZMBV_DECODER 0 ++#define CONFIG_AAC_DECODER 0 ++#define CONFIG_AAC_FIXED_DECODER 0 ++#define CONFIG_AAC_LATM_DECODER 0 ++#define CONFIG_AC3_DECODER 0 ++#define CONFIG_AC3_FIXED_DECODER 0 ++#define CONFIG_ACELP_KELVIN_DECODER 0 ++#define CONFIG_ALAC_DECODER 0 ++#define CONFIG_ALS_DECODER 0 ++#define CONFIG_AMRNB_DECODER 0 ++#define CONFIG_AMRWB_DECODER 0 ++#define CONFIG_APAC_DECODER 0 ++#define CONFIG_APE_DECODER 0 ++#define CONFIG_APTX_DECODER 0 ++#define CONFIG_APTX_HD_DECODER 0 ++#define CONFIG_ATRAC1_DECODER 0 ++#define CONFIG_ATRAC3_DECODER 0 ++#define CONFIG_ATRAC3AL_DECODER 0 ++#define CONFIG_ATRAC3P_DECODER 0 ++#define CONFIG_ATRAC3PAL_DECODER 0 ++#define CONFIG_ATRAC9_DECODER 0 ++#define CONFIG_BINKAUDIO_DCT_DECODER 0 ++#define CONFIG_BINKAUDIO_RDFT_DECODER 0 ++#define CONFIG_BMV_AUDIO_DECODER 0 ++#define CONFIG_BONK_DECODER 0 ++#define CONFIG_COOK_DECODER 0 ++#define CONFIG_DCA_DECODER 0 ++#define CONFIG_DFPWM_DECODER 0 ++#define CONFIG_DOLBY_E_DECODER 0 ++#define CONFIG_DSD_LSBF_DECODER 0 ++#define CONFIG_DSD_MSBF_DECODER 0 ++#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 ++#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 ++#define CONFIG_DSICINAUDIO_DECODER 0 ++#define CONFIG_DSS_SP_DECODER 0 ++#define CONFIG_DST_DECODER 0 ++#define CONFIG_EAC3_DECODER 0 ++#define CONFIG_EVRC_DECODER 0 ++#define CONFIG_FASTAUDIO_DECODER 0 ++#define CONFIG_FFWAVESYNTH_DECODER 0 ++#define CONFIG_FLAC_DECODER 1 ++#define CONFIG_FTR_DECODER 0 ++#define CONFIG_G723_1_DECODER 0 ++#define CONFIG_G729_DECODER 0 ++#define CONFIG_GSM_DECODER 0 ++#define CONFIG_GSM_MS_DECODER 0 ++#define CONFIG_HCA_DECODER 0 ++#define CONFIG_HCOM_DECODER 0 ++#define CONFIG_HDR_DECODER 0 ++#define CONFIG_IAC_DECODER 0 ++#define CONFIG_ILBC_DECODER 0 ++#define CONFIG_IMC_DECODER 0 ++#define CONFIG_INTERPLAY_ACM_DECODER 0 ++#define CONFIG_MACE3_DECODER 0 ++#define CONFIG_MACE6_DECODER 0 ++#define CONFIG_METASOUND_DECODER 0 ++#define CONFIG_MISC4_DECODER 0 ++#define CONFIG_MLP_DECODER 0 ++#define CONFIG_MP1_DECODER 0 ++#define CONFIG_MP1FLOAT_DECODER 0 ++#define CONFIG_MP2_DECODER 0 ++#define CONFIG_MP2FLOAT_DECODER 0 ++#define CONFIG_MP3FLOAT_DECODER 0 ++#define CONFIG_MP3_DECODER 1 ++#define CONFIG_MP3ADUFLOAT_DECODER 0 ++#define CONFIG_MP3ADU_DECODER 0 ++#define CONFIG_MP3ON4FLOAT_DECODER 0 ++#define CONFIG_MP3ON4_DECODER 0 ++#define CONFIG_MPC7_DECODER 0 ++#define CONFIG_MPC8_DECODER 0 ++#define CONFIG_MSNSIREN_DECODER 0 ++#define CONFIG_NELLYMOSER_DECODER 0 ++#define CONFIG_ON2AVC_DECODER 0 ++#define CONFIG_OPUS_DECODER 0 ++#define CONFIG_OSQ_DECODER 0 ++#define CONFIG_PAF_AUDIO_DECODER 0 ++#define CONFIG_QCELP_DECODER 0 ++#define CONFIG_QDM2_DECODER 0 ++#define CONFIG_QDMC_DECODER 0 ++#define CONFIG_RA_144_DECODER 0 ++#define CONFIG_RA_288_DECODER 0 ++#define CONFIG_RALF_DECODER 0 ++#define CONFIG_SBC_DECODER 0 ++#define CONFIG_SHORTEN_DECODER 0 ++#define CONFIG_SIPR_DECODER 0 ++#define CONFIG_SIREN_DECODER 0 ++#define CONFIG_SMACKAUD_DECODER 0 ++#define CONFIG_SONIC_DECODER 0 ++#define CONFIG_TAK_DECODER 0 ++#define CONFIG_TRUEHD_DECODER 0 ++#define CONFIG_TRUESPEECH_DECODER 0 ++#define CONFIG_TTA_DECODER 0 ++#define CONFIG_TWINVQ_DECODER 0 ++#define CONFIG_VMDAUDIO_DECODER 0 ++#define CONFIG_VORBIS_DECODER 1 ++#define CONFIG_WAVARC_DECODER 0 ++#define CONFIG_WAVPACK_DECODER 0 ++#define CONFIG_WMALOSSLESS_DECODER 0 ++#define CONFIG_WMAPRO_DECODER 0 ++#define CONFIG_WMAV1_DECODER 0 ++#define CONFIG_WMAV2_DECODER 0 ++#define CONFIG_WMAVOICE_DECODER 0 ++#define CONFIG_WS_SND1_DECODER 0 ++#define CONFIG_XMA1_DECODER 0 ++#define CONFIG_XMA2_DECODER 0 ++#define CONFIG_PCM_ALAW_DECODER 1 ++#define CONFIG_PCM_BLURAY_DECODER 0 ++#define CONFIG_PCM_DVD_DECODER 0 ++#define CONFIG_PCM_F16LE_DECODER 0 ++#define CONFIG_PCM_F24LE_DECODER 0 ++#define CONFIG_PCM_F32BE_DECODER 0 ++#define CONFIG_PCM_F32LE_DECODER 1 ++#define CONFIG_PCM_F64BE_DECODER 0 ++#define CONFIG_PCM_F64LE_DECODER 0 ++#define CONFIG_PCM_LXF_DECODER 0 ++#define CONFIG_PCM_MULAW_DECODER 1 ++#define CONFIG_PCM_S8_DECODER 0 ++#define CONFIG_PCM_S8_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16BE_DECODER 1 ++#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S16LE_DECODER 1 ++#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S24BE_DECODER 1 ++#define CONFIG_PCM_S24DAUD_DECODER 0 ++#define CONFIG_PCM_S24LE_DECODER 1 ++#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S32BE_DECODER 0 ++#define CONFIG_PCM_S32LE_DECODER 1 ++#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 ++#define CONFIG_PCM_S64BE_DECODER 0 ++#define CONFIG_PCM_S64LE_DECODER 0 ++#define CONFIG_PCM_SGA_DECODER 0 ++#define CONFIG_PCM_U8_DECODER 1 ++#define CONFIG_PCM_U16BE_DECODER 0 ++#define CONFIG_PCM_U16LE_DECODER 0 ++#define CONFIG_PCM_U24BE_DECODER 0 ++#define CONFIG_PCM_U24LE_DECODER 0 ++#define CONFIG_PCM_U32BE_DECODER 0 ++#define CONFIG_PCM_U32LE_DECODER 0 ++#define CONFIG_PCM_VIDC_DECODER 0 ++#define CONFIG_CBD2_DPCM_DECODER 0 ++#define CONFIG_DERF_DPCM_DECODER 0 ++#define CONFIG_GREMLIN_DPCM_DECODER 0 ++#define CONFIG_INTERPLAY_DPCM_DECODER 0 ++#define CONFIG_ROQ_DPCM_DECODER 0 ++#define CONFIG_SDX2_DPCM_DECODER 0 ++#define CONFIG_SOL_DPCM_DECODER 0 ++#define CONFIG_XAN_DPCM_DECODER 0 ++#define CONFIG_WADY_DPCM_DECODER 0 ++#define CONFIG_ADPCM_4XM_DECODER 0 ++#define CONFIG_ADPCM_ADX_DECODER 0 ++#define CONFIG_ADPCM_AFC_DECODER 0 ++#define CONFIG_ADPCM_AGM_DECODER 0 ++#define CONFIG_ADPCM_AICA_DECODER 0 ++#define CONFIG_ADPCM_ARGO_DECODER 0 ++#define CONFIG_ADPCM_CT_DECODER 0 ++#define CONFIG_ADPCM_DTK_DECODER 0 ++#define CONFIG_ADPCM_EA_DECODER 0 ++#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 ++#define CONFIG_ADPCM_EA_R1_DECODER 0 ++#define CONFIG_ADPCM_EA_R2_DECODER 0 ++#define CONFIG_ADPCM_EA_R3_DECODER 0 ++#define CONFIG_ADPCM_EA_XAS_DECODER 0 ++#define CONFIG_ADPCM_G722_DECODER 0 ++#define CONFIG_ADPCM_G726_DECODER 0 ++#define CONFIG_ADPCM_G726LE_DECODER 0 ++#define CONFIG_ADPCM_IMA_ACORN_DECODER 0 ++#define CONFIG_ADPCM_IMA_AMV_DECODER 0 ++#define CONFIG_ADPCM_IMA_ALP_DECODER 0 ++#define CONFIG_ADPCM_IMA_APC_DECODER 0 ++#define CONFIG_ADPCM_IMA_APM_DECODER 0 ++#define CONFIG_ADPCM_IMA_CUNNING_DECODER 0 ++#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK3_DECODER 0 ++#define CONFIG_ADPCM_IMA_DK4_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 ++#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_ISS_DECODER 0 ++#define CONFIG_ADPCM_IMA_MOFLEX_DECODER 0 ++#define CONFIG_ADPCM_IMA_MTF_DECODER 0 ++#define CONFIG_ADPCM_IMA_OKI_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_DECODER 0 ++#define CONFIG_ADPCM_IMA_RAD_DECODER 0 ++#define CONFIG_ADPCM_IMA_SSI_DECODER 0 ++#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 ++#define CONFIG_ADPCM_IMA_WAV_DECODER 0 ++#define CONFIG_ADPCM_IMA_WS_DECODER 0 ++#define CONFIG_ADPCM_MS_DECODER 0 ++#define CONFIG_ADPCM_MTAF_DECODER 0 ++#define CONFIG_ADPCM_PSX_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_2_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_3_DECODER 0 ++#define CONFIG_ADPCM_SBPRO_4_DECODER 0 ++#define CONFIG_ADPCM_SWF_DECODER 0 ++#define CONFIG_ADPCM_THP_DECODER 0 ++#define CONFIG_ADPCM_THP_LE_DECODER 0 ++#define CONFIG_ADPCM_VIMA_DECODER 0 ++#define CONFIG_ADPCM_XA_DECODER 0 ++#define CONFIG_ADPCM_XMD_DECODER 0 ++#define CONFIG_ADPCM_YAMAHA_DECODER 0 ++#define CONFIG_ADPCM_ZORK_DECODER 0 ++#define CONFIG_SSA_DECODER 0 ++#define CONFIG_ASS_DECODER 0 ++#define CONFIG_CCAPTION_DECODER 0 ++#define CONFIG_DVBSUB_DECODER 0 ++#define CONFIG_DVDSUB_DECODER 0 ++#define CONFIG_JACOSUB_DECODER 0 ++#define CONFIG_MICRODVD_DECODER 0 ++#define CONFIG_MOVTEXT_DECODER 0 ++#define CONFIG_MPL2_DECODER 0 ++#define CONFIG_PGSSUB_DECODER 0 ++#define CONFIG_PJS_DECODER 0 ++#define CONFIG_REALTEXT_DECODER 0 ++#define CONFIG_SAMI_DECODER 0 ++#define CONFIG_SRT_DECODER 0 ++#define CONFIG_STL_DECODER 0 ++#define CONFIG_SUBRIP_DECODER 0 ++#define CONFIG_SUBVIEWER_DECODER 0 ++#define CONFIG_SUBVIEWER1_DECODER 0 ++#define CONFIG_TEXT_DECODER 0 ++#define CONFIG_VPLAYER_DECODER 0 ++#define CONFIG_WEBVTT_DECODER 0 ++#define CONFIG_XSUB_DECODER 0 ++#define CONFIG_AAC_AT_DECODER 0 ++#define CONFIG_AC3_AT_DECODER 0 ++#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 ++#define CONFIG_ALAC_AT_DECODER 0 ++#define CONFIG_AMR_NB_AT_DECODER 0 ++#define CONFIG_EAC3_AT_DECODER 0 ++#define CONFIG_GSM_MS_AT_DECODER 0 ++#define CONFIG_ILBC_AT_DECODER 0 ++#define CONFIG_MP1_AT_DECODER 0 ++#define CONFIG_MP2_AT_DECODER 0 ++#define CONFIG_MP3_AT_DECODER 0 ++#define CONFIG_PCM_ALAW_AT_DECODER 0 ++#define CONFIG_PCM_MULAW_AT_DECODER 0 ++#define CONFIG_QDMC_AT_DECODER 0 ++#define CONFIG_QDM2_AT_DECODER 0 ++#define CONFIG_LIBARIBCAPTION_DECODER 0 ++#define CONFIG_LIBARIBB24_DECODER 0 ++#define CONFIG_LIBCELT_DECODER 0 ++#define CONFIG_LIBCODEC2_DECODER 0 ++#define CONFIG_LIBDAV1D_DECODER 0 ++#define CONFIG_LIBDAVS2_DECODER 0 ++#define CONFIG_LIBFDK_AAC_DECODER 0 ++#define CONFIG_LIBGSM_DECODER 0 ++#define CONFIG_LIBGSM_MS_DECODER 0 ++#define CONFIG_LIBILBC_DECODER 0 ++#define CONFIG_LIBJXL_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 ++#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 ++#define CONFIG_LIBOPUS_DECODER 1 ++#define CONFIG_LIBRSVG_DECODER 0 ++#define CONFIG_LIBSPEEX_DECODER 0 ++#define CONFIG_LIBUAVS3D_DECODER 0 ++#define CONFIG_LIBVORBIS_DECODER 0 ++#define CONFIG_LIBVPX_VP8_DECODER 0 ++#define CONFIG_LIBVPX_VP9_DECODER 0 ++#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 ++#define CONFIG_BINTEXT_DECODER 0 ++#define CONFIG_XBIN_DECODER 0 ++#define CONFIG_IDF_DECODER 0 ++#define CONFIG_LIBAOM_AV1_DECODER 0 ++#define CONFIG_AV1_DECODER 0 ++#define CONFIG_AV1_CUVID_DECODER 0 ++#define CONFIG_AV1_MEDIACODEC_DECODER 0 ++#define CONFIG_AV1_QSV_DECODER 0 ++#define CONFIG_LIBOPENH264_DECODER 0 ++#define CONFIG_H264_CUVID_DECODER 0 ++#define CONFIG_HEVC_CUVID_DECODER 0 ++#define CONFIG_HEVC_MEDIACODEC_DECODER 0 ++#define CONFIG_MJPEG_CUVID_DECODER 0 ++#define CONFIG_MJPEG_QSV_DECODER 0 ++#define CONFIG_MPEG1_CUVID_DECODER 0 ++#define CONFIG_MPEG2_CUVID_DECODER 0 ++#define CONFIG_MPEG4_CUVID_DECODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 ++#define CONFIG_VC1_CUVID_DECODER 0 ++#define CONFIG_VP8_CUVID_DECODER 0 ++#define CONFIG_VP8_MEDIACODEC_DECODER 0 ++#define CONFIG_VP8_QSV_DECODER 0 ++#define CONFIG_VP9_CUVID_DECODER 0 ++#define CONFIG_VP9_MEDIACODEC_DECODER 0 ++#define CONFIG_VP9_QSV_DECODER 0 ++#define CONFIG_VNULL_DECODER 0 ++#define CONFIG_ANULL_DECODER 0 ++#define CONFIG_A64MULTI_ENCODER 0 ++#define CONFIG_A64MULTI5_ENCODER 0 ++#define CONFIG_ALIAS_PIX_ENCODER 0 ++#define CONFIG_AMV_ENCODER 0 ++#define CONFIG_APNG_ENCODER 0 ++#define CONFIG_ASV1_ENCODER 0 ++#define CONFIG_ASV2_ENCODER 0 ++#define CONFIG_AVRP_ENCODER 0 ++#define CONFIG_AVUI_ENCODER 0 ++#define CONFIG_AYUV_ENCODER 0 ++#define CONFIG_BITPACKED_ENCODER 0 ++#define CONFIG_BMP_ENCODER 0 ++#define CONFIG_CFHD_ENCODER 0 ++#define CONFIG_CINEPAK_ENCODER 0 ++#define CONFIG_CLJR_ENCODER 0 ++#define CONFIG_COMFORTNOISE_ENCODER 0 ++#define CONFIG_DNXHD_ENCODER 0 ++#define CONFIG_DPX_ENCODER 0 ++#define CONFIG_DVVIDEO_ENCODER 0 ++#define CONFIG_EXR_ENCODER 0 ++#define CONFIG_FFV1_ENCODER 0 ++#define CONFIG_FFVHUFF_ENCODER 0 ++#define CONFIG_FITS_ENCODER 0 ++#define CONFIG_FLASHSV_ENCODER 0 ++#define CONFIG_FLASHSV2_ENCODER 0 ++#define CONFIG_FLV_ENCODER 0 ++#define CONFIG_GIF_ENCODER 0 ++#define CONFIG_H261_ENCODER 0 ++#define CONFIG_H263_ENCODER 0 ++#define CONFIG_H263P_ENCODER 0 ++#define CONFIG_H264_MEDIACODEC_ENCODER 0 ++#define CONFIG_HAP_ENCODER 0 ++#define CONFIG_HUFFYUV_ENCODER 0 ++#define CONFIG_JPEG2000_ENCODER 0 ++#define CONFIG_JPEGLS_ENCODER 0 ++#define CONFIG_LJPEG_ENCODER 0 ++#define CONFIG_MAGICYUV_ENCODER 0 ++#define CONFIG_MJPEG_ENCODER 0 ++#define CONFIG_MPEG1VIDEO_ENCODER 0 ++#define CONFIG_MPEG2VIDEO_ENCODER 0 ++#define CONFIG_MPEG4_ENCODER 0 ++#define CONFIG_MSMPEG4V2_ENCODER 0 ++#define CONFIG_MSMPEG4V3_ENCODER 0 ++#define CONFIG_MSRLE_ENCODER 0 ++#define CONFIG_MSVIDEO1_ENCODER 0 ++#define CONFIG_PAM_ENCODER 0 ++#define CONFIG_PBM_ENCODER 0 ++#define CONFIG_PCX_ENCODER 0 ++#define CONFIG_PFM_ENCODER 0 ++#define CONFIG_PGM_ENCODER 0 ++#define CONFIG_PGMYUV_ENCODER 0 ++#define CONFIG_PHM_ENCODER 0 ++#define CONFIG_PNG_ENCODER 0 ++#define CONFIG_PPM_ENCODER 0 ++#define CONFIG_PRORES_ENCODER 0 ++#define CONFIG_PRORES_AW_ENCODER 0 ++#define CONFIG_PRORES_KS_ENCODER 0 ++#define CONFIG_QOI_ENCODER 0 ++#define CONFIG_QTRLE_ENCODER 0 ++#define CONFIG_R10K_ENCODER 0 ++#define CONFIG_R210_ENCODER 0 ++#define CONFIG_RAWVIDEO_ENCODER 0 ++#define CONFIG_ROQ_ENCODER 0 ++#define CONFIG_RPZA_ENCODER 0 ++#define CONFIG_RV10_ENCODER 0 ++#define CONFIG_RV20_ENCODER 0 ++#define CONFIG_S302M_ENCODER 0 ++#define CONFIG_SGI_ENCODER 0 ++#define CONFIG_SMC_ENCODER 0 ++#define CONFIG_SNOW_ENCODER 0 ++#define CONFIG_SPEEDHQ_ENCODER 0 ++#define CONFIG_SUNRAST_ENCODER 0 ++#define CONFIG_SVQ1_ENCODER 0 ++#define CONFIG_TARGA_ENCODER 0 ++#define CONFIG_TIFF_ENCODER 0 ++#define CONFIG_UTVIDEO_ENCODER 0 ++#define CONFIG_V210_ENCODER 0 ++#define CONFIG_V308_ENCODER 0 ++#define CONFIG_V408_ENCODER 0 ++#define CONFIG_V410_ENCODER 0 ++#define CONFIG_VBN_ENCODER 0 ++#define CONFIG_VC2_ENCODER 0 ++#define CONFIG_WBMP_ENCODER 0 ++#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 ++#define CONFIG_WMV1_ENCODER 0 ++#define CONFIG_WMV2_ENCODER 0 ++#define CONFIG_XBM_ENCODER 0 ++#define CONFIG_XFACE_ENCODER 0 ++#define CONFIG_XWD_ENCODER 0 ++#define CONFIG_Y41P_ENCODER 0 ++#define CONFIG_YUV4_ENCODER 0 ++#define CONFIG_ZLIB_ENCODER 0 ++#define CONFIG_ZMBV_ENCODER 0 ++#define CONFIG_AAC_ENCODER 0 ++#define CONFIG_AC3_ENCODER 0 ++#define CONFIG_AC3_FIXED_ENCODER 0 ++#define CONFIG_ALAC_ENCODER 0 ++#define CONFIG_APTX_ENCODER 0 ++#define CONFIG_APTX_HD_ENCODER 0 ++#define CONFIG_DCA_ENCODER 0 ++#define CONFIG_DFPWM_ENCODER 0 ++#define CONFIG_EAC3_ENCODER 0 ++#define CONFIG_FLAC_ENCODER 0 ++#define CONFIG_G723_1_ENCODER 0 ++#define CONFIG_HDR_ENCODER 0 ++#define CONFIG_MLP_ENCODER 0 ++#define CONFIG_MP2_ENCODER 0 ++#define CONFIG_MP2FIXED_ENCODER 0 ++#define CONFIG_NELLYMOSER_ENCODER 0 ++#define CONFIG_OPUS_ENCODER 0 ++#define CONFIG_RA_144_ENCODER 0 ++#define CONFIG_SBC_ENCODER 0 ++#define CONFIG_SONIC_ENCODER 0 ++#define CONFIG_SONIC_LS_ENCODER 0 ++#define CONFIG_TRUEHD_ENCODER 0 ++#define CONFIG_TTA_ENCODER 0 ++#define CONFIG_VORBIS_ENCODER 0 ++#define CONFIG_WAVPACK_ENCODER 0 ++#define CONFIG_WMAV1_ENCODER 0 ++#define CONFIG_WMAV2_ENCODER 0 ++#define CONFIG_PCM_ALAW_ENCODER 0 ++#define CONFIG_PCM_BLURAY_ENCODER 0 ++#define CONFIG_PCM_DVD_ENCODER 0 ++#define CONFIG_PCM_F32BE_ENCODER 0 ++#define CONFIG_PCM_F32LE_ENCODER 0 ++#define CONFIG_PCM_F64BE_ENCODER 0 ++#define CONFIG_PCM_F64LE_ENCODER 0 ++#define CONFIG_PCM_MULAW_ENCODER 0 ++#define CONFIG_PCM_S8_ENCODER 0 ++#define CONFIG_PCM_S8_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16BE_ENCODER 0 ++#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S16LE_ENCODER 0 ++#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S24BE_ENCODER 0 ++#define CONFIG_PCM_S24DAUD_ENCODER 0 ++#define CONFIG_PCM_S24LE_ENCODER 0 ++#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S32BE_ENCODER 0 ++#define CONFIG_PCM_S32LE_ENCODER 0 ++#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 ++#define CONFIG_PCM_S64BE_ENCODER 0 ++#define CONFIG_PCM_S64LE_ENCODER 0 ++#define CONFIG_PCM_U8_ENCODER 0 ++#define CONFIG_PCM_U16BE_ENCODER 0 ++#define CONFIG_PCM_U16LE_ENCODER 0 ++#define CONFIG_PCM_U24BE_ENCODER 0 ++#define CONFIG_PCM_U24LE_ENCODER 0 ++#define CONFIG_PCM_U32BE_ENCODER 0 ++#define CONFIG_PCM_U32LE_ENCODER 0 ++#define CONFIG_PCM_VIDC_ENCODER 0 ++#define CONFIG_ROQ_DPCM_ENCODER 0 ++#define CONFIG_ADPCM_ADX_ENCODER 0 ++#define CONFIG_ADPCM_ARGO_ENCODER 0 ++#define CONFIG_ADPCM_G722_ENCODER 0 ++#define CONFIG_ADPCM_G726_ENCODER 0 ++#define CONFIG_ADPCM_G726LE_ENCODER 0 ++#define CONFIG_ADPCM_IMA_AMV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_ALP_ENCODER 0 ++#define CONFIG_ADPCM_IMA_APM_ENCODER 0 ++#define CONFIG_ADPCM_IMA_QT_ENCODER 0 ++#define CONFIG_ADPCM_IMA_SSI_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 ++#define CONFIG_ADPCM_IMA_WS_ENCODER 0 ++#define CONFIG_ADPCM_MS_ENCODER 0 ++#define CONFIG_ADPCM_SWF_ENCODER 0 ++#define CONFIG_ADPCM_YAMAHA_ENCODER 0 ++#define CONFIG_SSA_ENCODER 0 ++#define CONFIG_ASS_ENCODER 0 ++#define CONFIG_DVBSUB_ENCODER 0 ++#define CONFIG_DVDSUB_ENCODER 0 ++#define CONFIG_MOVTEXT_ENCODER 0 ++#define CONFIG_SRT_ENCODER 0 ++#define CONFIG_SUBRIP_ENCODER 0 ++#define CONFIG_TEXT_ENCODER 0 ++#define CONFIG_TTML_ENCODER 0 ++#define CONFIG_WEBVTT_ENCODER 0 ++#define CONFIG_XSUB_ENCODER 0 ++#define CONFIG_AAC_AT_ENCODER 0 ++#define CONFIG_ALAC_AT_ENCODER 0 ++#define CONFIG_ILBC_AT_ENCODER 0 ++#define CONFIG_PCM_ALAW_AT_ENCODER 0 ++#define CONFIG_PCM_MULAW_AT_ENCODER 0 ++#define CONFIG_LIBAOM_AV1_ENCODER 0 ++#define CONFIG_LIBCODEC2_ENCODER 0 ++#define CONFIG_LIBFDK_AAC_ENCODER 0 ++#define CONFIG_LIBGSM_ENCODER 0 ++#define CONFIG_LIBGSM_MS_ENCODER 0 ++#define CONFIG_LIBILBC_ENCODER 0 ++#define CONFIG_LIBJXL_ENCODER 0 ++#define CONFIG_LIBMP3LAME_ENCODER 0 ++#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 ++#define CONFIG_LIBOPENJPEG_ENCODER 0 ++#define CONFIG_LIBOPUS_ENCODER 0 ++#define CONFIG_LIBRAV1E_ENCODER 0 ++#define CONFIG_LIBSHINE_ENCODER 0 ++#define CONFIG_LIBSPEEX_ENCODER 0 ++#define CONFIG_LIBSVTAV1_ENCODER 0 ++#define CONFIG_LIBTHEORA_ENCODER 0 ++#define CONFIG_LIBTWOLAME_ENCODER 0 ++#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 ++#define CONFIG_LIBVORBIS_ENCODER 0 ++#define CONFIG_LIBVPX_VP8_ENCODER 0 ++#define CONFIG_LIBVPX_VP9_ENCODER 0 ++#define CONFIG_LIBWEBP_ANIM_ENCODER 0 ++#define CONFIG_LIBWEBP_ENCODER 0 ++#define CONFIG_LIBX262_ENCODER 0 ++#define CONFIG_LIBX264_ENCODER 0 ++#define CONFIG_LIBX264RGB_ENCODER 0 ++#define CONFIG_LIBX265_ENCODER 0 ++#define CONFIG_LIBXAVS_ENCODER 0 ++#define CONFIG_LIBXAVS2_ENCODER 0 ++#define CONFIG_LIBXVID_ENCODER 0 ++#define CONFIG_AAC_MF_ENCODER 0 ++#define CONFIG_AC3_MF_ENCODER 0 ++#define CONFIG_H263_V4L2M2M_ENCODER 0 ++#define CONFIG_AV1_MEDIACODEC_ENCODER 0 ++#define CONFIG_AV1_NVENC_ENCODER 0 ++#define CONFIG_AV1_QSV_ENCODER 0 ++#define CONFIG_AV1_AMF_ENCODER 0 ++#define CONFIG_AV1_VAAPI_ENCODER 0 ++#define CONFIG_LIBOPENH264_ENCODER 0 ++#define CONFIG_H264_AMF_ENCODER 0 ++#define CONFIG_H264_MF_ENCODER 0 ++#define CONFIG_H264_NVENC_ENCODER 0 ++#define CONFIG_H264_OMX_ENCODER 0 ++#define CONFIG_H264_QSV_ENCODER 0 ++#define CONFIG_H264_V4L2M2M_ENCODER 0 ++#define CONFIG_H264_VAAPI_ENCODER 0 ++#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_HEVC_AMF_ENCODER 0 ++#define CONFIG_HEVC_MEDIACODEC_ENCODER 0 ++#define CONFIG_HEVC_MF_ENCODER 0 ++#define CONFIG_HEVC_NVENC_ENCODER 0 ++#define CONFIG_HEVC_QSV_ENCODER 0 ++#define CONFIG_HEVC_V4L2M2M_ENCODER 0 ++#define CONFIG_HEVC_VAAPI_ENCODER 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_LIBKVAZAAR_ENCODER 0 ++#define CONFIG_MJPEG_QSV_ENCODER 0 ++#define CONFIG_MJPEG_VAAPI_ENCODER 0 ++#define CONFIG_MP3_MF_ENCODER 0 ++#define CONFIG_MPEG2_QSV_ENCODER 0 ++#define CONFIG_MPEG2_VAAPI_ENCODER 0 ++#define CONFIG_MPEG4_MEDIACODEC_ENCODER 0 ++#define CONFIG_MPEG4_OMX_ENCODER 0 ++#define CONFIG_MPEG4_V4L2M2M_ENCODER 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_ENCODER 0 ++#define CONFIG_VP8_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP8_V4L2M2M_ENCODER 0 ++#define CONFIG_VP8_VAAPI_ENCODER 0 ++#define CONFIG_VP9_MEDIACODEC_ENCODER 0 ++#define CONFIG_VP9_VAAPI_ENCODER 0 ++#define CONFIG_VP9_QSV_ENCODER 0 ++#define CONFIG_VNULL_ENCODER 0 ++#define CONFIG_ANULL_ENCODER 0 ++#define CONFIG_AV1_D3D11VA_HWACCEL 0 ++#define CONFIG_AV1_D3D11VA2_HWACCEL 0 ++#define CONFIG_AV1_DXVA2_HWACCEL 0 ++#define CONFIG_AV1_NVDEC_HWACCEL 0 ++#define CONFIG_AV1_VAAPI_HWACCEL 0 ++#define CONFIG_AV1_VDPAU_HWACCEL 0 ++#define CONFIG_AV1_VULKAN_HWACCEL 0 ++#define CONFIG_H263_VAAPI_HWACCEL 0 ++#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_D3D11VA_HWACCEL 0 ++#define CONFIG_H264_D3D11VA2_HWACCEL 0 ++#define CONFIG_H264_DXVA2_HWACCEL 0 ++#define CONFIG_H264_NVDEC_HWACCEL 0 ++#define CONFIG_H264_VAAPI_HWACCEL 0 ++#define CONFIG_H264_VDPAU_HWACCEL 0 ++#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_H264_VULKAN_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA_HWACCEL 0 ++#define CONFIG_HEVC_D3D11VA2_HWACCEL 0 ++#define CONFIG_HEVC_DXVA2_HWACCEL 0 ++#define CONFIG_HEVC_NVDEC_HWACCEL 0 ++#define CONFIG_HEVC_VAAPI_HWACCEL 0 ++#define CONFIG_HEVC_VDPAU_HWACCEL 0 ++#define CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_HEVC_VULKAN_HWACCEL 0 ++#define CONFIG_MJPEG_NVDEC_HWACCEL 0 ++#define CONFIG_MJPEG_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG1_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG1_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 ++#define CONFIG_MPEG2_D3D11VA2_HWACCEL 0 ++#define CONFIG_MPEG2_DXVA2_HWACCEL 0 ++#define CONFIG_MPEG2_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG2_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG2_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_MPEG4_NVDEC_HWACCEL 0 ++#define CONFIG_MPEG4_VAAPI_HWACCEL 0 ++#define CONFIG_MPEG4_VDPAU_HWACCEL 0 ++#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA_HWACCEL 0 ++#define CONFIG_VC1_D3D11VA2_HWACCEL 0 ++#define CONFIG_VC1_DXVA2_HWACCEL 0 ++#define CONFIG_VC1_NVDEC_HWACCEL 0 ++#define CONFIG_VC1_VAAPI_HWACCEL 0 ++#define CONFIG_VC1_VDPAU_HWACCEL 0 ++#define CONFIG_VP8_NVDEC_HWACCEL 0 ++#define CONFIG_VP8_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA_HWACCEL 0 ++#define CONFIG_VP9_D3D11VA2_HWACCEL 0 ++#define CONFIG_VP9_DXVA2_HWACCEL 0 ++#define CONFIG_VP9_NVDEC_HWACCEL 0 ++#define CONFIG_VP9_VAAPI_HWACCEL 0 ++#define CONFIG_VP9_VDPAU_HWACCEL 0 ++#define CONFIG_VP9_VIDEOTOOLBOX_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA_HWACCEL 0 ++#define CONFIG_WMV3_D3D11VA2_HWACCEL 0 ++#define CONFIG_WMV3_DXVA2_HWACCEL 0 ++#define CONFIG_WMV3_NVDEC_HWACCEL 0 ++#define CONFIG_WMV3_VAAPI_HWACCEL 0 ++#define CONFIG_WMV3_VDPAU_HWACCEL 0 ++#define CONFIG_AAC_PARSER 0 ++#define CONFIG_AAC_LATM_PARSER 0 ++#define CONFIG_AC3_PARSER 0 ++#define CONFIG_ADX_PARSER 0 ++#define CONFIG_AMR_PARSER 0 ++#define CONFIG_AV1_PARSER 0 ++#define CONFIG_AVS2_PARSER 0 ++#define CONFIG_AVS3_PARSER 0 ++#define CONFIG_BMP_PARSER 0 ++#define CONFIG_CAVSVIDEO_PARSER 0 ++#define CONFIG_COOK_PARSER 0 ++#define CONFIG_CRI_PARSER 0 ++#define CONFIG_DCA_PARSER 0 ++#define CONFIG_DIRAC_PARSER 0 ++#define CONFIG_DNXHD_PARSER 0 ++#define CONFIG_DOLBY_E_PARSER 0 ++#define CONFIG_DPX_PARSER 0 ++#define CONFIG_DVAUDIO_PARSER 0 ++#define CONFIG_DVBSUB_PARSER 0 ++#define CONFIG_DVDSUB_PARSER 0 ++#define CONFIG_DVD_NAV_PARSER 0 ++#define CONFIG_EVC_PARSER 0 ++#define CONFIG_FLAC_PARSER 1 ++#define CONFIG_FTR_PARSER 0 ++#define CONFIG_G723_1_PARSER 0 ++#define CONFIG_G729_PARSER 0 ++#define CONFIG_GIF_PARSER 0 ++#define CONFIG_GSM_PARSER 0 ++#define CONFIG_H261_PARSER 0 ++#define CONFIG_H263_PARSER 0 ++#define CONFIG_H264_PARSER 0 ++#define CONFIG_HEVC_PARSER 0 ++#define CONFIG_HDR_PARSER 0 ++#define CONFIG_IPU_PARSER 0 ++#define CONFIG_JPEG2000_PARSER 0 ++#define CONFIG_JPEGXL_PARSER 0 ++#define CONFIG_MISC4_PARSER 0 ++#define CONFIG_MJPEG_PARSER 0 ++#define CONFIG_MLP_PARSER 0 ++#define CONFIG_MPEG4VIDEO_PARSER 0 ++#define CONFIG_MPEGAUDIO_PARSER 1 ++#define CONFIG_MPEGVIDEO_PARSER 0 ++#define CONFIG_OPUS_PARSER 1 ++#define CONFIG_PNG_PARSER 0 ++#define CONFIG_PNM_PARSER 0 ++#define CONFIG_QOI_PARSER 0 ++#define CONFIG_RV34_PARSER 0 ++#define CONFIG_SBC_PARSER 0 ++#define CONFIG_SIPR_PARSER 0 ++#define CONFIG_TAK_PARSER 0 ++#define CONFIG_VC1_PARSER 0 ++#define CONFIG_VORBIS_PARSER 1 ++#define CONFIG_VP3_PARSER 1 ++#define CONFIG_VP8_PARSER 1 ++#define CONFIG_VP9_PARSER 1 ++#define CONFIG_VVC_PARSER 0 ++#define CONFIG_WEBP_PARSER 0 ++#define CONFIG_XBM_PARSER 0 ++#define CONFIG_XMA_PARSER 0 ++#define CONFIG_XWD_PARSER 0 ++#define CONFIG_ALSA_INDEV 0 ++#define CONFIG_ANDROID_CAMERA_INDEV 0 ++#define CONFIG_AVFOUNDATION_INDEV 0 ++#define CONFIG_BKTR_INDEV 0 ++#define CONFIG_DECKLINK_INDEV 0 ++#define CONFIG_DSHOW_INDEV 0 ++#define CONFIG_FBDEV_INDEV 0 ++#define CONFIG_GDIGRAB_INDEV 0 ++#define CONFIG_IEC61883_INDEV 0 ++#define CONFIG_JACK_INDEV 0 ++#define CONFIG_KMSGRAB_INDEV 0 ++#define CONFIG_LAVFI_INDEV 0 ++#define CONFIG_OPENAL_INDEV 0 ++#define CONFIG_OSS_INDEV 0 ++#define CONFIG_PULSE_INDEV 0 ++#define CONFIG_SNDIO_INDEV 0 ++#define CONFIG_V4L2_INDEV 0 ++#define CONFIG_VFWCAP_INDEV 0 ++#define CONFIG_XCBGRAB_INDEV 0 ++#define CONFIG_LIBCDIO_INDEV 0 ++#define CONFIG_LIBDC1394_INDEV 0 ++#define CONFIG_ALSA_OUTDEV 0 ++#define CONFIG_AUDIOTOOLBOX_OUTDEV 0 ++#define CONFIG_CACA_OUTDEV 0 ++#define CONFIG_DECKLINK_OUTDEV 0 ++#define CONFIG_FBDEV_OUTDEV 0 ++#define CONFIG_OPENGL_OUTDEV 0 ++#define CONFIG_OSS_OUTDEV 0 ++#define CONFIG_PULSE_OUTDEV 0 ++#define CONFIG_SDL2_OUTDEV 0 ++#define CONFIG_SNDIO_OUTDEV 0 ++#define CONFIG_V4L2_OUTDEV 0 ++#define CONFIG_XV_OUTDEV 0 ++#define CONFIG_ABENCH_FILTER 0 ++#define CONFIG_ACOMPRESSOR_FILTER 0 ++#define CONFIG_ACONTRAST_FILTER 0 ++#define CONFIG_ACOPY_FILTER 0 ++#define CONFIG_ACUE_FILTER 0 ++#define CONFIG_ACROSSFADE_FILTER 0 ++#define CONFIG_ACROSSOVER_FILTER 0 ++#define CONFIG_ACRUSHER_FILTER 0 ++#define CONFIG_ADECLICK_FILTER 0 ++#define CONFIG_ADECLIP_FILTER 0 ++#define CONFIG_ADECORRELATE_FILTER 0 ++#define CONFIG_ADELAY_FILTER 0 ++#define CONFIG_ADENORM_FILTER 0 ++#define CONFIG_ADERIVATIVE_FILTER 0 ++#define CONFIG_ADRC_FILTER 0 ++#define CONFIG_ADYNAMICEQUALIZER_FILTER 0 ++#define CONFIG_ADYNAMICSMOOTH_FILTER 0 ++#define CONFIG_AECHO_FILTER 0 ++#define CONFIG_AEMPHASIS_FILTER 0 ++#define CONFIG_AEVAL_FILTER 0 ++#define CONFIG_AEXCITER_FILTER 0 ++#define CONFIG_AFADE_FILTER 0 ++#define CONFIG_AFFTDN_FILTER 0 ++#define CONFIG_AFFTFILT_FILTER 0 ++#define CONFIG_AFIR_FILTER 0 ++#define CONFIG_AFORMAT_FILTER 0 ++#define CONFIG_AFREQSHIFT_FILTER 0 ++#define CONFIG_AFWTDN_FILTER 0 ++#define CONFIG_AGATE_FILTER 0 ++#define CONFIG_AIIR_FILTER 0 ++#define CONFIG_AINTEGRAL_FILTER 0 ++#define CONFIG_AINTERLEAVE_FILTER 0 ++#define CONFIG_ALATENCY_FILTER 0 ++#define CONFIG_ALIMITER_FILTER 0 ++#define CONFIG_ALLPASS_FILTER 0 ++#define CONFIG_ALOOP_FILTER 0 ++#define CONFIG_AMERGE_FILTER 0 ++#define CONFIG_AMETADATA_FILTER 0 ++#define CONFIG_AMIX_FILTER 0 ++#define CONFIG_AMULTIPLY_FILTER 0 ++#define CONFIG_ANEQUALIZER_FILTER 0 ++#define CONFIG_ANLMDN_FILTER 0 ++#define CONFIG_ANLMF_FILTER 0 ++#define CONFIG_ANLMS_FILTER 0 ++#define CONFIG_ANULL_FILTER 0 ++#define CONFIG_APAD_FILTER 0 ++#define CONFIG_APERMS_FILTER 0 ++#define CONFIG_APHASER_FILTER 0 ++#define CONFIG_APHASESHIFT_FILTER 0 ++#define CONFIG_APSNR_FILTER 0 ++#define CONFIG_APSYCLIP_FILTER 0 ++#define CONFIG_APULSATOR_FILTER 0 ++#define CONFIG_AREALTIME_FILTER 0 ++#define CONFIG_ARESAMPLE_FILTER 0 ++#define CONFIG_AREVERSE_FILTER 0 ++#define CONFIG_ARLS_FILTER 0 ++#define CONFIG_ARNNDN_FILTER 0 ++#define CONFIG_ASDR_FILTER 0 ++#define CONFIG_ASEGMENT_FILTER 0 ++#define CONFIG_ASELECT_FILTER 0 ++#define CONFIG_ASENDCMD_FILTER 0 ++#define CONFIG_ASETNSAMPLES_FILTER 0 ++#define CONFIG_ASETPTS_FILTER 0 ++#define CONFIG_ASETRATE_FILTER 0 ++#define CONFIG_ASETTB_FILTER 0 ++#define CONFIG_ASHOWINFO_FILTER 0 ++#define CONFIG_ASIDEDATA_FILTER 0 ++#define CONFIG_ASISDR_FILTER 0 ++#define CONFIG_ASOFTCLIP_FILTER 0 ++#define CONFIG_ASPECTRALSTATS_FILTER 0 ++#define CONFIG_ASPLIT_FILTER 0 ++#define CONFIG_ASR_FILTER 0 ++#define CONFIG_ASTATS_FILTER 0 ++#define CONFIG_ASTREAMSELECT_FILTER 0 ++#define CONFIG_ASUBBOOST_FILTER 0 ++#define CONFIG_ASUBCUT_FILTER 0 ++#define CONFIG_ASUPERCUT_FILTER 0 ++#define CONFIG_ASUPERPASS_FILTER 0 ++#define CONFIG_ASUPERSTOP_FILTER 0 ++#define CONFIG_ATEMPO_FILTER 0 ++#define CONFIG_ATILT_FILTER 0 ++#define CONFIG_ATRIM_FILTER 0 ++#define CONFIG_AXCORRELATE_FILTER 0 ++#define CONFIG_AZMQ_FILTER 0 ++#define CONFIG_BANDPASS_FILTER 0 ++#define CONFIG_BANDREJECT_FILTER 0 ++#define CONFIG_BASS_FILTER 0 ++#define CONFIG_BIQUAD_FILTER 0 ++#define CONFIG_BS2B_FILTER 0 ++#define CONFIG_CHANNELMAP_FILTER 0 ++#define CONFIG_CHANNELSPLIT_FILTER 0 ++#define CONFIG_CHORUS_FILTER 0 ++#define CONFIG_COMPAND_FILTER 0 ++#define CONFIG_COMPENSATIONDELAY_FILTER 0 ++#define CONFIG_CROSSFEED_FILTER 0 ++#define CONFIG_CRYSTALIZER_FILTER 0 ++#define CONFIG_DCSHIFT_FILTER 0 ++#define CONFIG_DEESSER_FILTER 0 ++#define CONFIG_DIALOGUENHANCE_FILTER 0 ++#define CONFIG_DRMETER_FILTER 0 ++#define CONFIG_DYNAUDNORM_FILTER 0 ++#define CONFIG_EARWAX_FILTER 0 ++#define CONFIG_EBUR128_FILTER 0 ++#define CONFIG_EQUALIZER_FILTER 0 ++#define CONFIG_EXTRASTEREO_FILTER 0 ++#define CONFIG_FIREQUALIZER_FILTER 0 ++#define CONFIG_FLANGER_FILTER 0 ++#define CONFIG_HAAS_FILTER 0 ++#define CONFIG_HDCD_FILTER 0 ++#define CONFIG_HEADPHONE_FILTER 0 ++#define CONFIG_HIGHPASS_FILTER 0 ++#define CONFIG_HIGHSHELF_FILTER 0 ++#define CONFIG_JOIN_FILTER 0 ++#define CONFIG_LADSPA_FILTER 0 ++#define CONFIG_LOUDNORM_FILTER 0 ++#define CONFIG_LOWPASS_FILTER 0 ++#define CONFIG_LOWSHELF_FILTER 0 ++#define CONFIG_LV2_FILTER 0 ++#define CONFIG_MCOMPAND_FILTER 0 ++#define CONFIG_PAN_FILTER 0 ++#define CONFIG_REPLAYGAIN_FILTER 0 ++#define CONFIG_RUBBERBAND_FILTER 0 ++#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 ++#define CONFIG_SIDECHAINGATE_FILTER 0 ++#define CONFIG_SILENCEDETECT_FILTER 0 ++#define CONFIG_SILENCEREMOVE_FILTER 0 ++#define CONFIG_SOFALIZER_FILTER 0 ++#define CONFIG_SPEECHNORM_FILTER 0 ++#define CONFIG_STEREOTOOLS_FILTER 0 ++#define CONFIG_STEREOWIDEN_FILTER 0 ++#define CONFIG_SUPEREQUALIZER_FILTER 0 ++#define CONFIG_SURROUND_FILTER 0 ++#define CONFIG_TILTSHELF_FILTER 0 ++#define CONFIG_TREBLE_FILTER 0 ++#define CONFIG_TREMOLO_FILTER 0 ++#define CONFIG_VIBRATO_FILTER 0 ++#define CONFIG_VIRTUALBASS_FILTER 0 ++#define CONFIG_VOLUME_FILTER 0 ++#define CONFIG_VOLUMEDETECT_FILTER 0 ++#define CONFIG_AEVALSRC_FILTER 0 ++#define CONFIG_AFDELAYSRC_FILTER 0 ++#define CONFIG_AFIREQSRC_FILTER 0 ++#define CONFIG_AFIRSRC_FILTER 0 ++#define CONFIG_ANOISESRC_FILTER 0 ++#define CONFIG_ANULLSRC_FILTER 0 ++#define CONFIG_FLITE_FILTER 0 ++#define CONFIG_HILBERT_FILTER 0 ++#define CONFIG_SINC_FILTER 0 ++#define CONFIG_SINE_FILTER 0 ++#define CONFIG_ANULLSINK_FILTER 0 ++#define CONFIG_ADDROI_FILTER 0 ++#define CONFIG_ALPHAEXTRACT_FILTER 0 ++#define CONFIG_ALPHAMERGE_FILTER 0 ++#define CONFIG_AMPLIFY_FILTER 0 ++#define CONFIG_ASS_FILTER 0 ++#define CONFIG_ATADENOISE_FILTER 0 ++#define CONFIG_AVGBLUR_FILTER 0 ++#define CONFIG_AVGBLUR_OPENCL_FILTER 0 ++#define CONFIG_AVGBLUR_VULKAN_FILTER 0 ++#define CONFIG_BACKGROUNDKEY_FILTER 0 ++#define CONFIG_BBOX_FILTER 0 ++#define CONFIG_BENCH_FILTER 0 ++#define CONFIG_BILATERAL_FILTER 0 ++#define CONFIG_BILATERAL_CUDA_FILTER 0 ++#define CONFIG_BITPLANENOISE_FILTER 0 ++#define CONFIG_BLACKDETECT_FILTER 0 ++#define CONFIG_BLACKFRAME_FILTER 0 ++#define CONFIG_BLEND_FILTER 0 ++#define CONFIG_BLEND_VULKAN_FILTER 0 ++#define CONFIG_BLOCKDETECT_FILTER 0 ++#define CONFIG_BLURDETECT_FILTER 0 ++#define CONFIG_BM3D_FILTER 0 ++#define CONFIG_BOXBLUR_FILTER 0 ++#define CONFIG_BOXBLUR_OPENCL_FILTER 0 ++#define CONFIG_BWDIF_FILTER 0 ++#define CONFIG_BWDIF_CUDA_FILTER 0 ++#define CONFIG_BWDIF_VULKAN_FILTER 0 ++#define CONFIG_CAS_FILTER 0 ++#define CONFIG_CCREPACK_FILTER 0 ++#define CONFIG_CHROMABER_VULKAN_FILTER 0 ++#define CONFIG_CHROMAHOLD_FILTER 0 ++#define CONFIG_CHROMAKEY_FILTER 0 ++#define CONFIG_CHROMAKEY_CUDA_FILTER 0 ++#define CONFIG_CHROMANR_FILTER 0 ++#define CONFIG_CHROMASHIFT_FILTER 0 ++#define CONFIG_CIESCOPE_FILTER 0 ++#define CONFIG_CODECVIEW_FILTER 0 ++#define CONFIG_COLORBALANCE_FILTER 0 ++#define CONFIG_COLORCHANNELMIXER_FILTER 0 ++#define CONFIG_COLORCONTRAST_FILTER 0 ++#define CONFIG_COLORCORRECT_FILTER 0 ++#define CONFIG_COLORIZE_FILTER 0 ++#define CONFIG_COLORKEY_FILTER 0 ++#define CONFIG_COLORKEY_OPENCL_FILTER 0 ++#define CONFIG_COLORHOLD_FILTER 0 ++#define CONFIG_COLORLEVELS_FILTER 0 ++#define CONFIG_COLORMAP_FILTER 0 ++#define CONFIG_COLORMATRIX_FILTER 0 ++#define CONFIG_COLORSPACE_FILTER 0 ++#define CONFIG_COLORSPACE_CUDA_FILTER 0 ++#define CONFIG_COLORTEMPERATURE_FILTER 0 ++#define CONFIG_CONVOLUTION_FILTER 0 ++#define CONFIG_CONVOLUTION_OPENCL_FILTER 0 ++#define CONFIG_CONVOLVE_FILTER 0 ++#define CONFIG_COPY_FILTER 0 ++#define CONFIG_COREIMAGE_FILTER 0 ++#define CONFIG_CORR_FILTER 0 ++#define CONFIG_COVER_RECT_FILTER 0 ++#define CONFIG_CROP_FILTER 0 ++#define CONFIG_CROPDETECT_FILTER 0 ++#define CONFIG_CUE_FILTER 0 ++#define CONFIG_CURVES_FILTER 0 ++#define CONFIG_DATASCOPE_FILTER 0 ++#define CONFIG_DBLUR_FILTER 0 ++#define CONFIG_DCTDNOIZ_FILTER 0 ++#define CONFIG_DEBAND_FILTER 0 ++#define CONFIG_DEBLOCK_FILTER 0 ++#define CONFIG_DECIMATE_FILTER 0 ++#define CONFIG_DECONVOLVE_FILTER 0 ++#define CONFIG_DEDOT_FILTER 0 ++#define CONFIG_DEFLATE_FILTER 0 ++#define CONFIG_DEFLICKER_FILTER 0 ++#define CONFIG_DEINTERLACE_QSV_FILTER 0 ++#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 ++#define CONFIG_DEJUDDER_FILTER 0 ++#define CONFIG_DELOGO_FILTER 0 ++#define CONFIG_DENOISE_VAAPI_FILTER 0 ++#define CONFIG_DERAIN_FILTER 0 ++#define CONFIG_DESHAKE_FILTER 0 ++#define CONFIG_DESHAKE_OPENCL_FILTER 0 ++#define CONFIG_DESPILL_FILTER 0 ++#define CONFIG_DETELECINE_FILTER 0 ++#define CONFIG_DILATION_FILTER 0 ++#define CONFIG_DILATION_OPENCL_FILTER 0 ++#define CONFIG_DISPLACE_FILTER 0 ++#define CONFIG_DNN_CLASSIFY_FILTER 0 ++#define CONFIG_DNN_DETECT_FILTER 0 ++#define CONFIG_DNN_PROCESSING_FILTER 0 ++#define CONFIG_DOUBLEWEAVE_FILTER 0 ++#define CONFIG_DRAWBOX_FILTER 0 ++#define CONFIG_DRAWGRAPH_FILTER 0 ++#define CONFIG_DRAWGRID_FILTER 0 ++#define CONFIG_DRAWTEXT_FILTER 0 ++#define CONFIG_EDGEDETECT_FILTER 0 ++#define CONFIG_ELBG_FILTER 0 ++#define CONFIG_ENTROPY_FILTER 0 ++#define CONFIG_EPX_FILTER 0 ++#define CONFIG_EQ_FILTER 0 ++#define CONFIG_EROSION_FILTER 0 ++#define CONFIG_EROSION_OPENCL_FILTER 0 ++#define CONFIG_ESTDIF_FILTER 0 ++#define CONFIG_EXPOSURE_FILTER 0 ++#define CONFIG_EXTRACTPLANES_FILTER 0 ++#define CONFIG_FADE_FILTER 0 ++#define CONFIG_FEEDBACK_FILTER 0 ++#define CONFIG_FFTDNOIZ_FILTER 0 ++#define CONFIG_FFTFILT_FILTER 0 ++#define CONFIG_FIELD_FILTER 0 ++#define CONFIG_FIELDHINT_FILTER 0 ++#define CONFIG_FIELDMATCH_FILTER 0 ++#define CONFIG_FIELDORDER_FILTER 0 ++#define CONFIG_FILLBORDERS_FILTER 0 ++#define CONFIG_FIND_RECT_FILTER 0 ++#define CONFIG_FLIP_VULKAN_FILTER 0 ++#define CONFIG_FLOODFILL_FILTER 0 ++#define CONFIG_FORMAT_FILTER 0 ++#define CONFIG_FPS_FILTER 0 ++#define CONFIG_FRAMEPACK_FILTER 0 ++#define CONFIG_FRAMERATE_FILTER 0 ++#define CONFIG_FRAMESTEP_FILTER 0 ++#define CONFIG_FREEZEDETECT_FILTER 0 ++#define CONFIG_FREEZEFRAMES_FILTER 0 ++#define CONFIG_FREI0R_FILTER 0 ++#define CONFIG_FSPP_FILTER 0 ++#define CONFIG_GBLUR_FILTER 0 ++#define CONFIG_GBLUR_VULKAN_FILTER 0 ++#define CONFIG_GEQ_FILTER 0 ++#define CONFIG_GRADFUN_FILTER 0 ++#define CONFIG_GRAPHMONITOR_FILTER 0 ++#define CONFIG_GRAYWORLD_FILTER 0 ++#define CONFIG_GREYEDGE_FILTER 0 ++#define CONFIG_GUIDED_FILTER 0 ++#define CONFIG_HALDCLUT_FILTER 0 ++#define CONFIG_HFLIP_FILTER 0 ++#define CONFIG_HFLIP_VULKAN_FILTER 0 ++#define CONFIG_HISTEQ_FILTER 0 ++#define CONFIG_HISTOGRAM_FILTER 0 ++#define CONFIG_HQDN3D_FILTER 0 ++#define CONFIG_HQX_FILTER 0 ++#define CONFIG_HSTACK_FILTER 0 ++#define CONFIG_HSVHOLD_FILTER 0 ++#define CONFIG_HSVKEY_FILTER 0 ++#define CONFIG_HUE_FILTER 0 ++#define CONFIG_HUESATURATION_FILTER 0 ++#define CONFIG_HWDOWNLOAD_FILTER 0 ++#define CONFIG_HWMAP_FILTER 0 ++#define CONFIG_HWUPLOAD_FILTER 0 ++#define CONFIG_HWUPLOAD_CUDA_FILTER 0 ++#define CONFIG_HYSTERESIS_FILTER 0 ++#define CONFIG_ICCDETECT_FILTER 0 ++#define CONFIG_ICCGEN_FILTER 0 ++#define CONFIG_IDENTITY_FILTER 0 ++#define CONFIG_IDET_FILTER 0 ++#define CONFIG_IL_FILTER 0 ++#define CONFIG_INFLATE_FILTER 0 ++#define CONFIG_INTERLACE_FILTER 0 ++#define CONFIG_INTERLEAVE_FILTER 0 ++#define CONFIG_KERNDEINT_FILTER 0 ++#define CONFIG_KIRSCH_FILTER 0 ++#define CONFIG_LAGFUN_FILTER 0 ++#define CONFIG_LATENCY_FILTER 0 ++#define CONFIG_LENSCORRECTION_FILTER 0 ++#define CONFIG_LENSFUN_FILTER 0 ++#define CONFIG_LIBPLACEBO_FILTER 0 ++#define CONFIG_LIBVMAF_FILTER 0 ++#define CONFIG_LIBVMAF_CUDA_FILTER 0 ++#define CONFIG_LIMITDIFF_FILTER 0 ++#define CONFIG_LIMITER_FILTER 0 ++#define CONFIG_LOOP_FILTER 0 ++#define CONFIG_LUMAKEY_FILTER 0 ++#define CONFIG_LUT_FILTER 0 ++#define CONFIG_LUT1D_FILTER 0 ++#define CONFIG_LUT2_FILTER 0 ++#define CONFIG_LUT3D_FILTER 0 ++#define CONFIG_LUTRGB_FILTER 0 ++#define CONFIG_LUTYUV_FILTER 0 ++#define CONFIG_MASKEDCLAMP_FILTER 0 ++#define CONFIG_MASKEDMAX_FILTER 0 ++#define CONFIG_MASKEDMERGE_FILTER 0 ++#define CONFIG_MASKEDMIN_FILTER 0 ++#define CONFIG_MASKEDTHRESHOLD_FILTER 0 ++#define CONFIG_MASKFUN_FILTER 0 ++#define CONFIG_MCDEINT_FILTER 0 ++#define CONFIG_MEDIAN_FILTER 0 ++#define CONFIG_MERGEPLANES_FILTER 0 ++#define CONFIG_MESTIMATE_FILTER 0 ++#define CONFIG_METADATA_FILTER 0 ++#define CONFIG_MIDEQUALIZER_FILTER 0 ++#define CONFIG_MINTERPOLATE_FILTER 0 ++#define CONFIG_MIX_FILTER 0 ++#define CONFIG_MONOCHROME_FILTER 0 ++#define CONFIG_MORPHO_FILTER 0 ++#define CONFIG_MPDECIMATE_FILTER 0 ++#define CONFIG_MSAD_FILTER 0 ++#define CONFIG_MULTIPLY_FILTER 0 ++#define CONFIG_NEGATE_FILTER 0 ++#define CONFIG_NLMEANS_FILTER 0 ++#define CONFIG_NLMEANS_OPENCL_FILTER 0 ++#define CONFIG_NLMEANS_VULKAN_FILTER 0 ++#define CONFIG_NNEDI_FILTER 0 ++#define CONFIG_NOFORMAT_FILTER 0 ++#define CONFIG_NOISE_FILTER 0 ++#define CONFIG_NORMALIZE_FILTER 0 ++#define CONFIG_NULL_FILTER 0 ++#define CONFIG_OCR_FILTER 0 ++#define CONFIG_OCV_FILTER 0 ++#define CONFIG_OSCILLOSCOPE_FILTER 0 ++#define CONFIG_OVERLAY_FILTER 0 ++#define CONFIG_OVERLAY_OPENCL_FILTER 0 ++#define CONFIG_OVERLAY_QSV_FILTER 0 ++#define CONFIG_OVERLAY_VAAPI_FILTER 0 ++#define CONFIG_OVERLAY_VULKAN_FILTER 0 ++#define CONFIG_OVERLAY_CUDA_FILTER 0 ++#define CONFIG_OWDENOISE_FILTER 0 ++#define CONFIG_PAD_FILTER 0 ++#define CONFIG_PAD_OPENCL_FILTER 0 ++#define CONFIG_PALETTEGEN_FILTER 0 ++#define CONFIG_PALETTEUSE_FILTER 0 ++#define CONFIG_PERMS_FILTER 0 ++#define CONFIG_PERSPECTIVE_FILTER 0 ++#define CONFIG_PHASE_FILTER 0 ++#define CONFIG_PHOTOSENSITIVITY_FILTER 0 ++#define CONFIG_PIXDESCTEST_FILTER 0 ++#define CONFIG_PIXELIZE_FILTER 0 ++#define CONFIG_PIXSCOPE_FILTER 0 ++#define CONFIG_PP_FILTER 0 ++#define CONFIG_PP7_FILTER 0 ++#define CONFIG_PREMULTIPLY_FILTER 0 ++#define CONFIG_PREWITT_FILTER 0 ++#define CONFIG_PREWITT_OPENCL_FILTER 0 ++#define CONFIG_PROCAMP_VAAPI_FILTER 0 ++#define CONFIG_PROGRAM_OPENCL_FILTER 0 ++#define CONFIG_PSEUDOCOLOR_FILTER 0 ++#define CONFIG_PSNR_FILTER 0 ++#define CONFIG_PULLUP_FILTER 0 ++#define CONFIG_QP_FILTER 0 ++#define CONFIG_RANDOM_FILTER 0 ++#define CONFIG_READEIA608_FILTER 0 ++#define CONFIG_READVITC_FILTER 0 ++#define CONFIG_REALTIME_FILTER 0 ++#define CONFIG_REMAP_FILTER 0 ++#define CONFIG_REMAP_OPENCL_FILTER 0 ++#define CONFIG_REMOVEGRAIN_FILTER 0 ++#define CONFIG_REMOVELOGO_FILTER 0 ++#define CONFIG_REPEATFIELDS_FILTER 0 ++#define CONFIG_REVERSE_FILTER 0 ++#define CONFIG_RGBASHIFT_FILTER 0 ++#define CONFIG_ROBERTS_FILTER 0 ++#define CONFIG_ROBERTS_OPENCL_FILTER 0 ++#define CONFIG_ROTATE_FILTER 0 ++#define CONFIG_SAB_FILTER 0 ++#define CONFIG_SCALE_FILTER 0 ++#define CONFIG_SCALE_CUDA_FILTER 0 ++#define CONFIG_SCALE_NPP_FILTER 0 ++#define CONFIG_SCALE_QSV_FILTER 0 ++#define CONFIG_SCALE_VAAPI_FILTER 0 ++#define CONFIG_SCALE_VT_FILTER 0 ++#define CONFIG_SCALE_VULKAN_FILTER 0 ++#define CONFIG_SCALE2REF_FILTER 0 ++#define CONFIG_SCALE2REF_NPP_FILTER 0 ++#define CONFIG_SCDET_FILTER 0 ++#define CONFIG_SCHARR_FILTER 0 ++#define CONFIG_SCROLL_FILTER 0 ++#define CONFIG_SEGMENT_FILTER 0 ++#define CONFIG_SELECT_FILTER 0 ++#define CONFIG_SELECTIVECOLOR_FILTER 0 ++#define CONFIG_SENDCMD_FILTER 0 ++#define CONFIG_SEPARATEFIELDS_FILTER 0 ++#define CONFIG_SETDAR_FILTER 0 ++#define CONFIG_SETFIELD_FILTER 0 ++#define CONFIG_SETPARAMS_FILTER 0 ++#define CONFIG_SETPTS_FILTER 0 ++#define CONFIG_SETRANGE_FILTER 0 ++#define CONFIG_SETSAR_FILTER 0 ++#define CONFIG_SETTB_FILTER 0 ++#define CONFIG_SHARPEN_NPP_FILTER 0 ++#define CONFIG_SHARPNESS_VAAPI_FILTER 0 ++#define CONFIG_SHEAR_FILTER 0 ++#define CONFIG_SHOWINFO_FILTER 0 ++#define CONFIG_SHOWPALETTE_FILTER 0 ++#define CONFIG_SHUFFLEFRAMES_FILTER 0 ++#define CONFIG_SHUFFLEPIXELS_FILTER 0 ++#define CONFIG_SHUFFLEPLANES_FILTER 0 ++#define CONFIG_SIDEDATA_FILTER 0 ++#define CONFIG_SIGNALSTATS_FILTER 0 ++#define CONFIG_SIGNATURE_FILTER 0 ++#define CONFIG_SITI_FILTER 0 ++#define CONFIG_SMARTBLUR_FILTER 0 ++#define CONFIG_SOBEL_FILTER 0 ++#define CONFIG_SOBEL_OPENCL_FILTER 0 ++#define CONFIG_SPLIT_FILTER 0 ++#define CONFIG_SPP_FILTER 0 ++#define CONFIG_SR_FILTER 0 ++#define CONFIG_SSIM_FILTER 0 ++#define CONFIG_SSIM360_FILTER 0 ++#define CONFIG_STEREO3D_FILTER 0 ++#define CONFIG_STREAMSELECT_FILTER 0 ++#define CONFIG_SUBTITLES_FILTER 0 ++#define CONFIG_SUPER2XSAI_FILTER 0 ++#define CONFIG_SWAPRECT_FILTER 0 ++#define CONFIG_SWAPUV_FILTER 0 ++#define CONFIG_TBLEND_FILTER 0 ++#define CONFIG_TELECINE_FILTER 0 ++#define CONFIG_THISTOGRAM_FILTER 0 ++#define CONFIG_THRESHOLD_FILTER 0 ++#define CONFIG_THUMBNAIL_FILTER 0 ++#define CONFIG_THUMBNAIL_CUDA_FILTER 0 ++#define CONFIG_TILE_FILTER 0 ++#define CONFIG_TINTERLACE_FILTER 0 ++#define CONFIG_TLUT2_FILTER 0 ++#define CONFIG_TMEDIAN_FILTER 0 ++#define CONFIG_TMIDEQUALIZER_FILTER 0 ++#define CONFIG_TMIX_FILTER 0 ++#define CONFIG_TONEMAP_FILTER 0 ++#define CONFIG_TONEMAP_OPENCL_FILTER 0 ++#define CONFIG_TONEMAP_VAAPI_FILTER 0 ++#define CONFIG_TPAD_FILTER 0 ++#define CONFIG_TRANSPOSE_FILTER 0 ++#define CONFIG_TRANSPOSE_NPP_FILTER 0 ++#define CONFIG_TRANSPOSE_OPENCL_FILTER 0 ++#define CONFIG_TRANSPOSE_VAAPI_FILTER 0 ++#define CONFIG_TRANSPOSE_VT_FILTER 0 ++#define CONFIG_TRANSPOSE_VULKAN_FILTER 0 ++#define CONFIG_TRIM_FILTER 0 ++#define CONFIG_UNPREMULTIPLY_FILTER 0 ++#define CONFIG_UNSHARP_FILTER 0 ++#define CONFIG_UNSHARP_OPENCL_FILTER 0 ++#define CONFIG_UNTILE_FILTER 0 ++#define CONFIG_USPP_FILTER 0 ++#define CONFIG_V360_FILTER 0 ++#define CONFIG_VAGUEDENOISER_FILTER 0 ++#define CONFIG_VARBLUR_FILTER 0 ++#define CONFIG_VECTORSCOPE_FILTER 0 ++#define CONFIG_VFLIP_FILTER 0 ++#define CONFIG_VFLIP_VULKAN_FILTER 0 ++#define CONFIG_VFRDET_FILTER 0 ++#define CONFIG_VIBRANCE_FILTER 0 ++#define CONFIG_VIDSTABDETECT_FILTER 0 ++#define CONFIG_VIDSTABTRANSFORM_FILTER 0 ++#define CONFIG_VIF_FILTER 0 ++#define CONFIG_VIGNETTE_FILTER 0 ++#define CONFIG_VMAFMOTION_FILTER 0 ++#define CONFIG_VPP_QSV_FILTER 0 ++#define CONFIG_VSTACK_FILTER 0 ++#define CONFIG_W3FDIF_FILTER 0 ++#define CONFIG_WAVEFORM_FILTER 0 ++#define CONFIG_WEAVE_FILTER 0 ++#define CONFIG_XBR_FILTER 0 ++#define CONFIG_XCORRELATE_FILTER 0 ++#define CONFIG_XFADE_FILTER 0 ++#define CONFIG_XFADE_OPENCL_FILTER 0 ++#define CONFIG_XFADE_VULKAN_FILTER 0 ++#define CONFIG_XMEDIAN_FILTER 0 ++#define CONFIG_XSTACK_FILTER 0 ++#define CONFIG_YADIF_FILTER 0 ++#define CONFIG_YADIF_CUDA_FILTER 0 ++#define CONFIG_YADIF_VIDEOTOOLBOX_FILTER 0 ++#define CONFIG_YAEPBLUR_FILTER 0 ++#define CONFIG_ZMQ_FILTER 0 ++#define CONFIG_ZOOMPAN_FILTER 0 ++#define CONFIG_ZSCALE_FILTER 0 ++#define CONFIG_HSTACK_VAAPI_FILTER 0 ++#define CONFIG_VSTACK_VAAPI_FILTER 0 ++#define CONFIG_XSTACK_VAAPI_FILTER 0 ++#define CONFIG_HSTACK_QSV_FILTER 0 ++#define CONFIG_VSTACK_QSV_FILTER 0 ++#define CONFIG_XSTACK_QSV_FILTER 0 ++#define CONFIG_ALLRGB_FILTER 0 ++#define CONFIG_ALLYUV_FILTER 0 ++#define CONFIG_CELLAUTO_FILTER 0 ++#define CONFIG_COLOR_FILTER 0 ++#define CONFIG_COLOR_VULKAN_FILTER 0 ++#define CONFIG_COLORCHART_FILTER 0 ++#define CONFIG_COLORSPECTRUM_FILTER 0 ++#define CONFIG_COREIMAGESRC_FILTER 0 ++#define CONFIG_DDAGRAB_FILTER 0 ++#define CONFIG_FREI0R_SRC_FILTER 0 ++#define CONFIG_GRADIENTS_FILTER 0 ++#define CONFIG_HALDCLUTSRC_FILTER 0 ++#define CONFIG_LIFE_FILTER 0 ++#define CONFIG_MANDELBROT_FILTER 0 ++#define CONFIG_MPTESTSRC_FILTER 0 ++#define CONFIG_NULLSRC_FILTER 0 ++#define CONFIG_OPENCLSRC_FILTER 0 ++#define CONFIG_PAL75BARS_FILTER 0 ++#define CONFIG_PAL100BARS_FILTER 0 ++#define CONFIG_RGBTESTSRC_FILTER 0 ++#define CONFIG_SIERPINSKI_FILTER 0 ++#define CONFIG_SMPTEBARS_FILTER 0 ++#define CONFIG_SMPTEHDBARS_FILTER 0 ++#define CONFIG_TESTSRC_FILTER 0 ++#define CONFIG_TESTSRC2_FILTER 0 ++#define CONFIG_YUVTESTSRC_FILTER 0 ++#define CONFIG_ZONEPLATE_FILTER 0 ++#define CONFIG_NULLSINK_FILTER 0 ++#define CONFIG_A3DSCOPE_FILTER 0 ++#define CONFIG_ABITSCOPE_FILTER 0 ++#define CONFIG_ADRAWGRAPH_FILTER 0 ++#define CONFIG_AGRAPHMONITOR_FILTER 0 ++#define CONFIG_AHISTOGRAM_FILTER 0 ++#define CONFIG_APHASEMETER_FILTER 0 ++#define CONFIG_AVECTORSCOPE_FILTER 0 ++#define CONFIG_CONCAT_FILTER 0 ++#define CONFIG_SHOWCQT_FILTER 0 ++#define CONFIG_SHOWCWT_FILTER 0 ++#define CONFIG_SHOWFREQS_FILTER 0 ++#define CONFIG_SHOWSPATIAL_FILTER 0 ++#define CONFIG_SHOWSPECTRUM_FILTER 0 ++#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 ++#define CONFIG_SHOWVOLUME_FILTER 0 ++#define CONFIG_SHOWWAVES_FILTER 0 ++#define CONFIG_SHOWWAVESPIC_FILTER 0 ++#define CONFIG_SPECTRUMSYNTH_FILTER 0 ++#define CONFIG_AVSYNCTEST_FILTER 0 ++#define CONFIG_AMOVIE_FILTER 0 ++#define CONFIG_MOVIE_FILTER 0 ++#define CONFIG_AFIFO_FILTER 0 ++#define CONFIG_FIFO_FILTER 0 ++#define CONFIG_AA_DEMUXER 0 ++#define CONFIG_AAC_DEMUXER 0 ++#define CONFIG_AAX_DEMUXER 0 ++#define CONFIG_AC3_DEMUXER 0 ++#define CONFIG_AC4_DEMUXER 0 ++#define CONFIG_ACE_DEMUXER 0 ++#define CONFIG_ACM_DEMUXER 0 ++#define CONFIG_ACT_DEMUXER 0 ++#define CONFIG_ADF_DEMUXER 0 ++#define CONFIG_ADP_DEMUXER 0 ++#define CONFIG_ADS_DEMUXER 0 ++#define CONFIG_ADX_DEMUXER 0 ++#define CONFIG_AEA_DEMUXER 0 ++#define CONFIG_AFC_DEMUXER 0 ++#define CONFIG_AIFF_DEMUXER 0 ++#define CONFIG_AIX_DEMUXER 0 ++#define CONFIG_ALP_DEMUXER 0 ++#define CONFIG_AMR_DEMUXER 0 ++#define CONFIG_AMRNB_DEMUXER 0 ++#define CONFIG_AMRWB_DEMUXER 0 ++#define CONFIG_ANM_DEMUXER 0 ++#define CONFIG_APAC_DEMUXER 0 ++#define CONFIG_APC_DEMUXER 0 ++#define CONFIG_APE_DEMUXER 0 ++#define CONFIG_APM_DEMUXER 0 ++#define CONFIG_APNG_DEMUXER 0 ++#define CONFIG_APTX_DEMUXER 0 ++#define CONFIG_APTX_HD_DEMUXER 0 ++#define CONFIG_AQTITLE_DEMUXER 0 ++#define CONFIG_ARGO_ASF_DEMUXER 0 ++#define CONFIG_ARGO_BRP_DEMUXER 0 ++#define CONFIG_ARGO_CVG_DEMUXER 0 ++#define CONFIG_ASF_DEMUXER 0 ++#define CONFIG_ASF_O_DEMUXER 0 ++#define CONFIG_ASS_DEMUXER 0 ++#define CONFIG_AST_DEMUXER 0 ++#define CONFIG_AU_DEMUXER 0 ++#define CONFIG_AV1_DEMUXER 0 ++#define CONFIG_AVI_DEMUXER 0 ++#define CONFIG_AVISYNTH_DEMUXER 0 ++#define CONFIG_AVR_DEMUXER 0 ++#define CONFIG_AVS_DEMUXER 0 ++#define CONFIG_AVS2_DEMUXER 0 ++#define CONFIG_AVS3_DEMUXER 0 ++#define CONFIG_BETHSOFTVID_DEMUXER 0 ++#define CONFIG_BFI_DEMUXER 0 ++#define CONFIG_BINTEXT_DEMUXER 0 ++#define CONFIG_BINK_DEMUXER 0 ++#define CONFIG_BINKA_DEMUXER 0 ++#define CONFIG_BIT_DEMUXER 0 ++#define CONFIG_BITPACKED_DEMUXER 0 ++#define CONFIG_BMV_DEMUXER 0 ++#define CONFIG_BFSTM_DEMUXER 0 ++#define CONFIG_BRSTM_DEMUXER 0 ++#define CONFIG_BOA_DEMUXER 0 ++#define CONFIG_BONK_DEMUXER 0 ++#define CONFIG_C93_DEMUXER 0 ++#define CONFIG_CAF_DEMUXER 0 ++#define CONFIG_CAVSVIDEO_DEMUXER 0 ++#define CONFIG_CDG_DEMUXER 0 ++#define CONFIG_CDXL_DEMUXER 0 ++#define CONFIG_CINE_DEMUXER 0 ++#define CONFIG_CODEC2_DEMUXER 0 ++#define CONFIG_CODEC2RAW_DEMUXER 0 ++#define CONFIG_CONCAT_DEMUXER 0 ++#define CONFIG_DASH_DEMUXER 0 ++#define CONFIG_DATA_DEMUXER 0 ++#define CONFIG_DAUD_DEMUXER 0 ++#define CONFIG_DCSTR_DEMUXER 0 ++#define CONFIG_DERF_DEMUXER 0 ++#define CONFIG_DFA_DEMUXER 0 ++#define CONFIG_DFPWM_DEMUXER 0 ++#define CONFIG_DHAV_DEMUXER 0 ++#define CONFIG_DIRAC_DEMUXER 0 ++#define CONFIG_DNXHD_DEMUXER 0 ++#define CONFIG_DSF_DEMUXER 0 ++#define CONFIG_DSICIN_DEMUXER 0 ++#define CONFIG_DSS_DEMUXER 0 ++#define CONFIG_DTS_DEMUXER 0 ++#define CONFIG_DTSHD_DEMUXER 0 ++#define CONFIG_DV_DEMUXER 0 ++#define CONFIG_DVBSUB_DEMUXER 0 ++#define CONFIG_DVBTXT_DEMUXER 0 ++#define CONFIG_DXA_DEMUXER 0 ++#define CONFIG_EA_DEMUXER 0 ++#define CONFIG_EA_CDATA_DEMUXER 0 ++#define CONFIG_EAC3_DEMUXER 0 ++#define CONFIG_EPAF_DEMUXER 0 ++#define CONFIG_EVC_DEMUXER 0 ++#define CONFIG_FFMETADATA_DEMUXER 0 ++#define CONFIG_FILMSTRIP_DEMUXER 0 ++#define CONFIG_FITS_DEMUXER 0 ++#define CONFIG_FLAC_DEMUXER 1 ++#define CONFIG_FLIC_DEMUXER 0 ++#define CONFIG_FLV_DEMUXER 0 ++#define CONFIG_LIVE_FLV_DEMUXER 0 ++#define CONFIG_FOURXM_DEMUXER 0 ++#define CONFIG_FRM_DEMUXER 0 ++#define CONFIG_FSB_DEMUXER 0 ++#define CONFIG_FWSE_DEMUXER 0 ++#define CONFIG_G722_DEMUXER 0 ++#define CONFIG_G723_1_DEMUXER 0 ++#define CONFIG_G726_DEMUXER 0 ++#define CONFIG_G726LE_DEMUXER 0 ++#define CONFIG_G729_DEMUXER 0 ++#define CONFIG_GDV_DEMUXER 0 ++#define CONFIG_GENH_DEMUXER 0 ++#define CONFIG_GIF_DEMUXER 0 ++#define CONFIG_GSM_DEMUXER 0 ++#define CONFIG_GXF_DEMUXER 0 ++#define CONFIG_H261_DEMUXER 0 ++#define CONFIG_H263_DEMUXER 0 ++#define CONFIG_H264_DEMUXER 0 ++#define CONFIG_HCA_DEMUXER 0 ++#define CONFIG_HCOM_DEMUXER 0 ++#define CONFIG_HEVC_DEMUXER 0 ++#define CONFIG_HLS_DEMUXER 0 ++#define CONFIG_HNM_DEMUXER 0 ++#define CONFIG_ICO_DEMUXER 0 ++#define CONFIG_IDCIN_DEMUXER 0 ++#define CONFIG_IDF_DEMUXER 0 ++#define CONFIG_IFF_DEMUXER 0 ++#define CONFIG_IFV_DEMUXER 0 ++#define CONFIG_ILBC_DEMUXER 0 ++#define CONFIG_IMAGE2_DEMUXER 0 ++#define CONFIG_IMAGE2PIPE_DEMUXER 0 ++#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 ++#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 ++#define CONFIG_IMF_DEMUXER 0 ++#define CONFIG_INGENIENT_DEMUXER 0 ++#define CONFIG_IPMOVIE_DEMUXER 0 ++#define CONFIG_IPU_DEMUXER 0 ++#define CONFIG_IRCAM_DEMUXER 0 ++#define CONFIG_ISS_DEMUXER 0 ++#define CONFIG_IV8_DEMUXER 0 ++#define CONFIG_IVF_DEMUXER 0 ++#define CONFIG_IVR_DEMUXER 0 ++#define CONFIG_JACOSUB_DEMUXER 0 ++#define CONFIG_JV_DEMUXER 0 ++#define CONFIG_JPEGXL_ANIM_DEMUXER 0 ++#define CONFIG_KUX_DEMUXER 0 ++#define CONFIG_KVAG_DEMUXER 0 ++#define CONFIG_LAF_DEMUXER 0 ++#define CONFIG_LMLM4_DEMUXER 0 ++#define CONFIG_LOAS_DEMUXER 0 ++#define CONFIG_LUODAT_DEMUXER 0 ++#define CONFIG_LRC_DEMUXER 0 ++#define CONFIG_LVF_DEMUXER 0 ++#define CONFIG_LXF_DEMUXER 0 ++#define CONFIG_M4V_DEMUXER 0 ++#define CONFIG_MCA_DEMUXER 0 ++#define CONFIG_MCC_DEMUXER 0 ++#define CONFIG_MATROSKA_DEMUXER 1 ++#define CONFIG_MGSTS_DEMUXER 0 ++#define CONFIG_MICRODVD_DEMUXER 0 ++#define CONFIG_MJPEG_DEMUXER 0 ++#define CONFIG_MJPEG_2000_DEMUXER 0 ++#define CONFIG_MLP_DEMUXER 0 ++#define CONFIG_MLV_DEMUXER 0 ++#define CONFIG_MM_DEMUXER 0 ++#define CONFIG_MMF_DEMUXER 0 ++#define CONFIG_MODS_DEMUXER 0 ++#define CONFIG_MOFLEX_DEMUXER 0 ++#define CONFIG_MOV_DEMUXER 1 ++#define CONFIG_MP3_DEMUXER 1 ++#define CONFIG_MPC_DEMUXER 0 ++#define CONFIG_MPC8_DEMUXER 0 ++#define CONFIG_MPEGPS_DEMUXER 0 ++#define CONFIG_MPEGTS_DEMUXER 0 ++#define CONFIG_MPEGTSRAW_DEMUXER 0 ++#define CONFIG_MPEGVIDEO_DEMUXER 0 ++#define CONFIG_MPJPEG_DEMUXER 0 ++#define CONFIG_MPL2_DEMUXER 0 ++#define CONFIG_MPSUB_DEMUXER 0 ++#define CONFIG_MSF_DEMUXER 0 ++#define CONFIG_MSNWC_TCP_DEMUXER 0 ++#define CONFIG_MSP_DEMUXER 0 ++#define CONFIG_MTAF_DEMUXER 0 ++#define CONFIG_MTV_DEMUXER 0 ++#define CONFIG_MUSX_DEMUXER 0 ++#define CONFIG_MV_DEMUXER 0 ++#define CONFIG_MVI_DEMUXER 0 ++#define CONFIG_MXF_DEMUXER 0 ++#define CONFIG_MXG_DEMUXER 0 ++#define CONFIG_NC_DEMUXER 0 ++#define CONFIG_NISTSPHERE_DEMUXER 0 ++#define CONFIG_NSP_DEMUXER 0 ++#define CONFIG_NSV_DEMUXER 0 ++#define CONFIG_NUT_DEMUXER 0 ++#define CONFIG_NUV_DEMUXER 0 ++#define CONFIG_OBU_DEMUXER 0 ++#define CONFIG_OGG_DEMUXER 1 ++#define CONFIG_OMA_DEMUXER 0 ++#define CONFIG_OSQ_DEMUXER 0 ++#define CONFIG_PAF_DEMUXER 0 ++#define CONFIG_PCM_ALAW_DEMUXER 0 ++#define CONFIG_PCM_MULAW_DEMUXER 0 ++#define CONFIG_PCM_VIDC_DEMUXER 0 ++#define CONFIG_PCM_F64BE_DEMUXER 0 ++#define CONFIG_PCM_F64LE_DEMUXER 0 ++#define CONFIG_PCM_F32BE_DEMUXER 0 ++#define CONFIG_PCM_F32LE_DEMUXER 0 ++#define CONFIG_PCM_S32BE_DEMUXER 0 ++#define CONFIG_PCM_S32LE_DEMUXER 0 ++#define CONFIG_PCM_S24BE_DEMUXER 0 ++#define CONFIG_PCM_S24LE_DEMUXER 0 ++#define CONFIG_PCM_S16BE_DEMUXER 0 ++#define CONFIG_PCM_S16LE_DEMUXER 0 ++#define CONFIG_PCM_S8_DEMUXER 0 ++#define CONFIG_PCM_U32BE_DEMUXER 0 ++#define CONFIG_PCM_U32LE_DEMUXER 0 ++#define CONFIG_PCM_U24BE_DEMUXER 0 ++#define CONFIG_PCM_U24LE_DEMUXER 0 ++#define CONFIG_PCM_U16BE_DEMUXER 0 ++#define CONFIG_PCM_U16LE_DEMUXER 0 ++#define CONFIG_PCM_U8_DEMUXER 0 ++#define CONFIG_PDV_DEMUXER 0 ++#define CONFIG_PJS_DEMUXER 0 ++#define CONFIG_PMP_DEMUXER 0 ++#define CONFIG_PP_BNK_DEMUXER 0 ++#define CONFIG_PVA_DEMUXER 0 ++#define CONFIG_PVF_DEMUXER 0 ++#define CONFIG_QCP_DEMUXER 0 ++#define CONFIG_R3D_DEMUXER 0 ++#define CONFIG_RAWVIDEO_DEMUXER 0 ++#define CONFIG_REALTEXT_DEMUXER 0 ++#define CONFIG_REDSPARK_DEMUXER 0 ++#define CONFIG_RKA_DEMUXER 0 ++#define CONFIG_RL2_DEMUXER 0 ++#define CONFIG_RM_DEMUXER 0 ++#define CONFIG_ROQ_DEMUXER 0 ++#define CONFIG_RPL_DEMUXER 0 ++#define CONFIG_RSD_DEMUXER 0 ++#define CONFIG_RSO_DEMUXER 0 ++#define CONFIG_RTP_DEMUXER 0 ++#define CONFIG_RTSP_DEMUXER 0 ++#define CONFIG_S337M_DEMUXER 0 ++#define CONFIG_SAMI_DEMUXER 0 ++#define CONFIG_SAP_DEMUXER 0 ++#define CONFIG_SBC_DEMUXER 0 ++#define CONFIG_SBG_DEMUXER 0 ++#define CONFIG_SCC_DEMUXER 0 ++#define CONFIG_SCD_DEMUXER 0 ++#define CONFIG_SDNS_DEMUXER 0 ++#define CONFIG_SDP_DEMUXER 0 ++#define CONFIG_SDR2_DEMUXER 0 ++#define CONFIG_SDS_DEMUXER 0 ++#define CONFIG_SDX_DEMUXER 0 ++#define CONFIG_SEGAFILM_DEMUXER 0 ++#define CONFIG_SER_DEMUXER 0 ++#define CONFIG_SGA_DEMUXER 0 ++#define CONFIG_SHORTEN_DEMUXER 0 ++#define CONFIG_SIFF_DEMUXER 0 ++#define CONFIG_SIMBIOSIS_IMX_DEMUXER 0 ++#define CONFIG_SLN_DEMUXER 0 ++#define CONFIG_SMACKER_DEMUXER 0 ++#define CONFIG_SMJPEG_DEMUXER 0 ++#define CONFIG_SMUSH_DEMUXER 0 ++#define CONFIG_SOL_DEMUXER 0 ++#define CONFIG_SOX_DEMUXER 0 ++#define CONFIG_SPDIF_DEMUXER 0 ++#define CONFIG_SRT_DEMUXER 0 ++#define CONFIG_STR_DEMUXER 0 ++#define CONFIG_STL_DEMUXER 0 ++#define CONFIG_SUBVIEWER1_DEMUXER 0 ++#define CONFIG_SUBVIEWER_DEMUXER 0 ++#define CONFIG_SUP_DEMUXER 0 ++#define CONFIG_SVAG_DEMUXER 0 ++#define CONFIG_SVS_DEMUXER 0 ++#define CONFIG_SWF_DEMUXER 0 ++#define CONFIG_TAK_DEMUXER 0 ++#define CONFIG_TEDCAPTIONS_DEMUXER 0 ++#define CONFIG_THP_DEMUXER 0 ++#define CONFIG_THREEDOSTR_DEMUXER 0 ++#define CONFIG_TIERTEXSEQ_DEMUXER 0 ++#define CONFIG_TMV_DEMUXER 0 ++#define CONFIG_TRUEHD_DEMUXER 0 ++#define CONFIG_TTA_DEMUXER 0 ++#define CONFIG_TXD_DEMUXER 0 ++#define CONFIG_TTY_DEMUXER 0 ++#define CONFIG_TY_DEMUXER 0 ++#define CONFIG_USM_DEMUXER 0 ++#define CONFIG_V210_DEMUXER 0 ++#define CONFIG_V210X_DEMUXER 0 ++#define CONFIG_VAG_DEMUXER 0 ++#define CONFIG_VC1_DEMUXER 0 ++#define CONFIG_VC1T_DEMUXER 0 ++#define CONFIG_VIVIDAS_DEMUXER 0 ++#define CONFIG_VIVO_DEMUXER 0 ++#define CONFIG_VMD_DEMUXER 0 ++#define CONFIG_VOBSUB_DEMUXER 0 ++#define CONFIG_VOC_DEMUXER 0 ++#define CONFIG_VPK_DEMUXER 0 ++#define CONFIG_VPLAYER_DEMUXER 0 ++#define CONFIG_VQF_DEMUXER 0 ++#define CONFIG_VVC_DEMUXER 0 ++#define CONFIG_W64_DEMUXER 0 ++#define CONFIG_WADY_DEMUXER 0 ++#define CONFIG_WAVARC_DEMUXER 0 ++#define CONFIG_WAV_DEMUXER 1 ++#define CONFIG_WC3_DEMUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 ++#define CONFIG_WEBVTT_DEMUXER 0 ++#define CONFIG_WSAUD_DEMUXER 0 ++#define CONFIG_WSD_DEMUXER 0 ++#define CONFIG_WSVQA_DEMUXER 0 ++#define CONFIG_WTV_DEMUXER 0 ++#define CONFIG_WVE_DEMUXER 0 ++#define CONFIG_WV_DEMUXER 0 ++#define CONFIG_XA_DEMUXER 0 ++#define CONFIG_XBIN_DEMUXER 0 ++#define CONFIG_XMD_DEMUXER 0 ++#define CONFIG_XMV_DEMUXER 0 ++#define CONFIG_XVAG_DEMUXER 0 ++#define CONFIG_XWMA_DEMUXER 0 ++#define CONFIG_YOP_DEMUXER 0 ++#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 ++#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_CRI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_HDR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_JPEGXL_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PFM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_QOI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SVG_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_VBN_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XBM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 ++#define CONFIG_IMAGE_XWD_PIPE_DEMUXER 0 ++#define CONFIG_LIBGME_DEMUXER 0 ++#define CONFIG_LIBMODPLUG_DEMUXER 0 ++#define CONFIG_LIBOPENMPT_DEMUXER 0 ++#define CONFIG_VAPOURSYNTH_DEMUXER 0 ++#define CONFIG_A64_MUXER 0 ++#define CONFIG_AC3_MUXER 0 ++#define CONFIG_AC4_MUXER 0 ++#define CONFIG_ADTS_MUXER 0 ++#define CONFIG_ADX_MUXER 0 ++#define CONFIG_AIFF_MUXER 0 ++#define CONFIG_ALP_MUXER 0 ++#define CONFIG_AMR_MUXER 0 ++#define CONFIG_AMV_MUXER 0 ++#define CONFIG_APM_MUXER 0 ++#define CONFIG_APNG_MUXER 0 ++#define CONFIG_APTX_MUXER 0 ++#define CONFIG_APTX_HD_MUXER 0 ++#define CONFIG_ARGO_ASF_MUXER 0 ++#define CONFIG_ARGO_CVG_MUXER 0 ++#define CONFIG_ASF_MUXER 0 ++#define CONFIG_ASS_MUXER 0 ++#define CONFIG_AST_MUXER 0 ++#define CONFIG_ASF_STREAM_MUXER 0 ++#define CONFIG_AU_MUXER 0 ++#define CONFIG_AVI_MUXER 0 ++#define CONFIG_AVIF_MUXER 0 ++#define CONFIG_AVM2_MUXER 0 ++#define CONFIG_AVS2_MUXER 0 ++#define CONFIG_AVS3_MUXER 0 ++#define CONFIG_BIT_MUXER 0 ++#define CONFIG_CAF_MUXER 0 ++#define CONFIG_CAVSVIDEO_MUXER 0 ++#define CONFIG_CODEC2_MUXER 0 ++#define CONFIG_CODEC2RAW_MUXER 0 ++#define CONFIG_CRC_MUXER 0 ++#define CONFIG_DASH_MUXER 0 ++#define CONFIG_DATA_MUXER 0 ++#define CONFIG_DAUD_MUXER 0 ++#define CONFIG_DFPWM_MUXER 0 ++#define CONFIG_DIRAC_MUXER 0 ++#define CONFIG_DNXHD_MUXER 0 ++#define CONFIG_DTS_MUXER 0 ++#define CONFIG_DV_MUXER 0 ++#define CONFIG_EAC3_MUXER 0 ++#define CONFIG_EVC_MUXER 0 ++#define CONFIG_F4V_MUXER 0 ++#define CONFIG_FFMETADATA_MUXER 0 ++#define CONFIG_FIFO_MUXER 0 ++#define CONFIG_FIFO_TEST_MUXER 0 ++#define CONFIG_FILMSTRIP_MUXER 0 ++#define CONFIG_FITS_MUXER 0 ++#define CONFIG_FLAC_MUXER 0 ++#define CONFIG_FLV_MUXER 0 ++#define CONFIG_FRAMECRC_MUXER 0 ++#define CONFIG_FRAMEHASH_MUXER 0 ++#define CONFIG_FRAMEMD5_MUXER 0 ++#define CONFIG_G722_MUXER 0 ++#define CONFIG_G723_1_MUXER 0 ++#define CONFIG_G726_MUXER 0 ++#define CONFIG_G726LE_MUXER 0 ++#define CONFIG_GIF_MUXER 0 ++#define CONFIG_GSM_MUXER 0 ++#define CONFIG_GXF_MUXER 0 ++#define CONFIG_H261_MUXER 0 ++#define CONFIG_H263_MUXER 0 ++#define CONFIG_H264_MUXER 0 ++#define CONFIG_HASH_MUXER 0 ++#define CONFIG_HDS_MUXER 0 ++#define CONFIG_HEVC_MUXER 0 ++#define CONFIG_HLS_MUXER 0 ++#define CONFIG_ICO_MUXER 0 ++#define CONFIG_ILBC_MUXER 0 ++#define CONFIG_IMAGE2_MUXER 0 ++#define CONFIG_IMAGE2PIPE_MUXER 0 ++#define CONFIG_IPOD_MUXER 0 ++#define CONFIG_IRCAM_MUXER 0 ++#define CONFIG_ISMV_MUXER 0 ++#define CONFIG_IVF_MUXER 0 ++#define CONFIG_JACOSUB_MUXER 0 ++#define CONFIG_KVAG_MUXER 0 ++#define CONFIG_LATM_MUXER 0 ++#define CONFIG_LRC_MUXER 0 ++#define CONFIG_M4V_MUXER 0 ++#define CONFIG_MD5_MUXER 0 ++#define CONFIG_MATROSKA_MUXER 0 ++#define CONFIG_MATROSKA_AUDIO_MUXER 0 ++#define CONFIG_MICRODVD_MUXER 0 ++#define CONFIG_MJPEG_MUXER 0 ++#define CONFIG_MLP_MUXER 0 ++#define CONFIG_MMF_MUXER 0 ++#define CONFIG_MOV_MUXER 0 ++#define CONFIG_MP2_MUXER 0 ++#define CONFIG_MP3_MUXER 0 ++#define CONFIG_MP4_MUXER 0 ++#define CONFIG_MPEG1SYSTEM_MUXER 0 ++#define CONFIG_MPEG1VCD_MUXER 0 ++#define CONFIG_MPEG1VIDEO_MUXER 0 ++#define CONFIG_MPEG2DVD_MUXER 0 ++#define CONFIG_MPEG2SVCD_MUXER 0 ++#define CONFIG_MPEG2VIDEO_MUXER 0 ++#define CONFIG_MPEG2VOB_MUXER 0 ++#define CONFIG_MPEGTS_MUXER 0 ++#define CONFIG_MPJPEG_MUXER 0 ++#define CONFIG_MXF_MUXER 0 ++#define CONFIG_MXF_D10_MUXER 0 ++#define CONFIG_MXF_OPATOM_MUXER 0 ++#define CONFIG_NULL_MUXER 0 ++#define CONFIG_NUT_MUXER 0 ++#define CONFIG_OBU_MUXER 0 ++#define CONFIG_OGA_MUXER 0 ++#define CONFIG_OGG_MUXER 0 ++#define CONFIG_OGV_MUXER 0 ++#define CONFIG_OMA_MUXER 0 ++#define CONFIG_OPUS_MUXER 0 ++#define CONFIG_PCM_ALAW_MUXER 0 ++#define CONFIG_PCM_MULAW_MUXER 0 ++#define CONFIG_PCM_VIDC_MUXER 0 ++#define CONFIG_PCM_F64BE_MUXER 0 ++#define CONFIG_PCM_F64LE_MUXER 0 ++#define CONFIG_PCM_F32BE_MUXER 0 ++#define CONFIG_PCM_F32LE_MUXER 0 ++#define CONFIG_PCM_S32BE_MUXER 0 ++#define CONFIG_PCM_S32LE_MUXER 0 ++#define CONFIG_PCM_S24BE_MUXER 0 ++#define CONFIG_PCM_S24LE_MUXER 0 ++#define CONFIG_PCM_S16BE_MUXER 0 ++#define CONFIG_PCM_S16LE_MUXER 0 ++#define CONFIG_PCM_S8_MUXER 0 ++#define CONFIG_PCM_U32BE_MUXER 0 ++#define CONFIG_PCM_U32LE_MUXER 0 ++#define CONFIG_PCM_U24BE_MUXER 0 ++#define CONFIG_PCM_U24LE_MUXER 0 ++#define CONFIG_PCM_U16BE_MUXER 0 ++#define CONFIG_PCM_U16LE_MUXER 0 ++#define CONFIG_PCM_U8_MUXER 0 ++#define CONFIG_PSP_MUXER 0 ++#define CONFIG_RAWVIDEO_MUXER 0 ++#define CONFIG_RM_MUXER 0 ++#define CONFIG_ROQ_MUXER 0 ++#define CONFIG_RSO_MUXER 0 ++#define CONFIG_RTP_MUXER 0 ++#define CONFIG_RTP_MPEGTS_MUXER 0 ++#define CONFIG_RTSP_MUXER 0 ++#define CONFIG_SAP_MUXER 0 ++#define CONFIG_SBC_MUXER 0 ++#define CONFIG_SCC_MUXER 0 ++#define CONFIG_SEGAFILM_MUXER 0 ++#define CONFIG_SEGMENT_MUXER 0 ++#define CONFIG_STREAM_SEGMENT_MUXER 0 ++#define CONFIG_SMJPEG_MUXER 0 ++#define CONFIG_SMOOTHSTREAMING_MUXER 0 ++#define CONFIG_SOX_MUXER 0 ++#define CONFIG_SPX_MUXER 0 ++#define CONFIG_SPDIF_MUXER 0 ++#define CONFIG_SRT_MUXER 0 ++#define CONFIG_STREAMHASH_MUXER 0 ++#define CONFIG_SUP_MUXER 0 ++#define CONFIG_SWF_MUXER 0 ++#define CONFIG_TEE_MUXER 0 ++#define CONFIG_TG2_MUXER 0 ++#define CONFIG_TGP_MUXER 0 ++#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 ++#define CONFIG_TRUEHD_MUXER 0 ++#define CONFIG_TTA_MUXER 0 ++#define CONFIG_TTML_MUXER 0 ++#define CONFIG_UNCODEDFRAMECRC_MUXER 0 ++#define CONFIG_VC1_MUXER 0 ++#define CONFIG_VC1T_MUXER 0 ++#define CONFIG_VOC_MUXER 0 ++#define CONFIG_VVC_MUXER 0 ++#define CONFIG_W64_MUXER 0 ++#define CONFIG_WAV_MUXER 0 ++#define CONFIG_WEBM_MUXER 0 ++#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 ++#define CONFIG_WEBM_CHUNK_MUXER 0 ++#define CONFIG_WEBP_MUXER 0 ++#define CONFIG_WEBVTT_MUXER 0 ++#define CONFIG_WSAUD_MUXER 0 ++#define CONFIG_WTV_MUXER 0 ++#define CONFIG_WV_MUXER 0 ++#define CONFIG_YUV4MPEGPIPE_MUXER 0 ++#define CONFIG_CHROMAPRINT_MUXER 0 ++#define CONFIG_ASYNC_PROTOCOL 0 ++#define CONFIG_BLURAY_PROTOCOL 0 ++#define CONFIG_CACHE_PROTOCOL 0 ++#define CONFIG_CONCAT_PROTOCOL 0 ++#define CONFIG_CONCATF_PROTOCOL 0 ++#define CONFIG_CRYPTO_PROTOCOL 0 ++#define CONFIG_DATA_PROTOCOL 0 ++#define CONFIG_FD_PROTOCOL 0 ++#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 ++#define CONFIG_FFRTMPHTTP_PROTOCOL 0 ++#define CONFIG_FILE_PROTOCOL 0 ++#define CONFIG_FTP_PROTOCOL 0 ++#define CONFIG_GOPHER_PROTOCOL 0 ++#define CONFIG_GOPHERS_PROTOCOL 0 ++#define CONFIG_HLS_PROTOCOL 0 ++#define CONFIG_HTTP_PROTOCOL 0 ++#define CONFIG_HTTPPROXY_PROTOCOL 0 ++#define CONFIG_HTTPS_PROTOCOL 0 ++#define CONFIG_ICECAST_PROTOCOL 0 ++#define CONFIG_MMSH_PROTOCOL 0 ++#define CONFIG_MMST_PROTOCOL 0 ++#define CONFIG_MD5_PROTOCOL 0 ++#define CONFIG_PIPE_PROTOCOL 0 ++#define CONFIG_PROMPEG_PROTOCOL 0 ++#define CONFIG_RTMP_PROTOCOL 0 ++#define CONFIG_RTMPE_PROTOCOL 0 ++#define CONFIG_RTMPS_PROTOCOL 0 ++#define CONFIG_RTMPT_PROTOCOL 0 ++#define CONFIG_RTMPTE_PROTOCOL 0 ++#define CONFIG_RTMPTS_PROTOCOL 0 ++#define CONFIG_RTP_PROTOCOL 0 ++#define CONFIG_SCTP_PROTOCOL 0 ++#define CONFIG_SRTP_PROTOCOL 0 ++#define CONFIG_SUBFILE_PROTOCOL 0 ++#define CONFIG_TEE_PROTOCOL 0 ++#define CONFIG_TCP_PROTOCOL 0 ++#define CONFIG_TLS_PROTOCOL 0 ++#define CONFIG_UDP_PROTOCOL 0 ++#define CONFIG_UDPLITE_PROTOCOL 0 ++#define CONFIG_UNIX_PROTOCOL 0 ++#define CONFIG_LIBAMQP_PROTOCOL 0 ++#define CONFIG_LIBRIST_PROTOCOL 0 ++#define CONFIG_LIBRTMP_PROTOCOL 0 ++#define CONFIG_LIBRTMPE_PROTOCOL 0 ++#define CONFIG_LIBRTMPS_PROTOCOL 0 ++#define CONFIG_LIBRTMPT_PROTOCOL 0 ++#define CONFIG_LIBRTMPTE_PROTOCOL 0 ++#define CONFIG_LIBSRT_PROTOCOL 0 ++#define CONFIG_LIBSSH_PROTOCOL 0 ++#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 ++#define CONFIG_LIBZMQ_PROTOCOL 0 ++#define CONFIG_IPFS_GATEWAY_PROTOCOL 0 ++#define CONFIG_IPNS_GATEWAY_PROTOCOL 0 ++#endif /* FFMPEG_CONFIG_COMPONENTS_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/bsf_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/bsf_list.c +@@ -0,0 +1,2 @@ ++static const FFBitStreamFilter * const bitstream_filters[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/codec_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/codec_list.c +@@ -0,0 +1,18 @@ ++static const FFCodec * const codec_list[] = { ++ &ff_theora_decoder, ++ &ff_vp3_decoder, ++ &ff_vp8_decoder, ++ &ff_flac_decoder, ++ &ff_mp3_decoder, ++ &ff_vorbis_decoder, ++ &ff_pcm_alaw_decoder, ++ &ff_pcm_f32le_decoder, ++ &ff_pcm_mulaw_decoder, ++ &ff_pcm_s16be_decoder, ++ &ff_pcm_s16le_decoder, ++ &ff_pcm_s24be_decoder, ++ &ff_pcm_s24le_decoder, ++ &ff_pcm_s32le_decoder, ++ &ff_pcm_u8_decoder, ++ &ff_libopus_decoder, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/parser_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavcodec/parser_list.c +@@ -0,0 +1,9 @@ ++static const AVCodecParser * const parser_list[] = { ++ &ff_flac_parser, ++ &ff_mpegaudio_parser, ++ &ff_opus_parser, ++ &ff_vorbis_parser, ++ &ff_vp3_parser, ++ &ff_vp8_parser, ++ &ff_vp9_parser, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/demuxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/demuxer_list.c +@@ -0,0 +1,8 @@ ++static const AVInputFormat * const demuxer_list[] = { ++ &ff_flac_demuxer, ++ &ff_matroska_demuxer, ++ &ff_mov_demuxer, ++ &ff_mp3_demuxer, ++ &ff_ogg_demuxer, ++ &ff_wav_demuxer, ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/muxer_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/muxer_list.c +@@ -0,0 +1,2 @@ ++static const FFOutputFormat * const muxer_list[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/protocol_list.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavformat/protocol_list.c +@@ -0,0 +1,2 @@ ++static const URLProtocol * const url_protocols[] = { ++ NULL }; +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavutil/avconfig.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavutil/avconfig.h +@@ -0,0 +1,6 @@ ++/* Generated by ffmpeg configure */ ++#ifndef AVUTIL_AVCONFIG_H ++#define AVUTIL_AVCONFIG_H ++#define AV_HAVE_BIGENDIAN 0 ++#define AV_HAVE_FAST_UNALIGNED 1 ++#endif /* AVUTIL_AVCONFIG_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavutil/ffversion.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/chromium/config/Chromium/linux/ppc64/libavutil/ffversion.h +@@ -0,0 +1,5 @@ ++/* Automatically generated by version.sh, do not manually edit! */ ++#ifndef AVUTIL_FFVERSION_H ++#define AVUTIL_FFVERSION_H ++#define FFMPEG_VERSION "5.1.git" ++#endif /* AVUTIL_FFVERSION_H */ +Index: chromium-120.0.6099.71/third_party/ffmpeg/ffmpeg_generated.gni +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/ffmpeg/ffmpeg_generated.gni ++++ chromium-120.0.6099.71/third_party/ffmpeg/ffmpeg_generated.gni +@@ -312,6 +312,40 @@ if ((use_linux_config && current_cpu == + ] + } + ++if (use_linux_config && current_cpu == "ppc64") { ++ ffmpeg_c_sources += [ ++ "libavutil/ppc/float_dsp_init.c", ++ "libavutil/ppc/cpu.c", ++ "libavutil/ppc/float_dsp_altivec.c", ++ "libavutil/ppc/float_dsp_vsx.c", ++ "libavcodec/ppc/audiodsp.c", ++ "libavcodec/ppc/blockdsp.c", ++ "libavcodec/ppc/fdctdsp.c", ++ "libavcodec/ppc/fmtconvert_altivec.c", ++ "libavcodec/ppc/h264dsp_ppc.c", ++ "libavcodec/ppc/h264qpel_ppc.c", ++ "libavcodec/ppc/h264chroma_init.c", ++ "libavcodec/ppc/hpeldsp_altivec.c", ++ "libavcodec/ppc/idctdsp.c", ++ "libavcodec/ppc/lossless_audiodsp_altivec.c", ++ "libavcodec/ppc/lossless_videodsp_altivec.c", ++ "libavcodec/ppc/me_cmp.c", ++ "libavcodec/ppc/mpegaudiodsp_altivec.c", ++ "libavcodec/ppc/mpegvideo_altivec.c", ++ "libavcodec/ppc/mpegvideoencdsp.c", ++ "libavcodec/ppc/pixblockdsp.c", ++ "libavcodec/ppc/svq1enc_altivec.c", ++ "libavcodec/ppc/vc1dsp_altivec.c", ++ "libavcodec/ppc/videodsp.c", ++ "libavcodec/ppc/vorbisdsp_altivec.c", ++ "libavcodec/ppc/vp3dsp_altivec.c", ++ "libavcodec/ppc/vp8dsp_altivec.c", ++ ] ++ ffmpeg_gas_sources += [ ++ "libavcodec/ppc/asm.S", ++ ] ++} ++ + if ((is_apple && current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_win && current_cpu == "x64" && ffmpeg_branding == "Chrome") || (is_win && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x64" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x64" && ffmpeg_branding == "ChromeOS") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "Chrome") || (use_linux_config && current_cpu == "x86" && ffmpeg_branding == "ChromeOS")) { + ffmpeg_c_sources += [ + "libavcodec/x86/h264_qpel.c", +Index: chromium-120.0.6099.71/third_party/ffmpeg/libavcodec/ppc/h264dsp_ppc.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/libavcodec/ppc/h264dsp_ppc.c +@@ -0,0 +1,815 @@ ++/* ++ * Copyright (c) 2004 Romain Dolbeau ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "config.h" ++ ++#include ++#include ++ ++#include "libavutil/attributes.h" ++#include "libavutil/cpu.h" ++#include "libavutil/intreadwrite.h" ++#include "libavutil/mem_internal.h" ++#include "libavutil/ppc/cpu.h" ++#include "libavutil/ppc/util_altivec.h" ++ ++#include "libavcodec/h264dec.h" ++#include "libavcodec/h264dsp.h" ++ ++#if HAVE_ALTIVEC ++ ++/**************************************************************************** ++ * IDCT transform: ++ ****************************************************************************/ ++ ++#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ ++ /* 1st stage */ \ ++ vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ ++ vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ ++ vz2 = vec_sra(vb1,vec_splat_u16(1)); \ ++ vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ ++ vz3 = vec_sra(vb3,vec_splat_u16(1)); \ ++ vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ ++ /* 2nd stage: output */ \ ++ va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ ++ va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ ++ va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ ++ va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ ++ ++#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ ++ b0 = vec_mergeh( a0, a0 ); \ ++ b1 = vec_mergeh( a1, a0 ); \ ++ b2 = vec_mergeh( a2, a0 ); \ ++ b3 = vec_mergeh( a3, a0 ); \ ++ a0 = vec_mergeh( b0, b2 ); \ ++ a1 = vec_mergel( b0, b2 ); \ ++ a2 = vec_mergeh( b1, b3 ); \ ++ a3 = vec_mergel( b1, b3 ); \ ++ b0 = vec_mergeh( a0, a2 ); \ ++ b1 = vec_mergel( a0, a2 ); \ ++ b2 = vec_mergeh( a1, a3 ); \ ++ b3 = vec_mergel( a1, a3 ) ++ ++#if HAVE_BIGENDIAN ++#define vdst_load(d) \ ++ vdst_orig = vec_ld(0, dst); \ ++ vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); ++#else ++#define vdst_load(d) vdst = vec_vsx_ld(0, dst) ++#endif ++ ++#define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \ ++ vdst_load(); \ ++ vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst); \ ++ va = vec_add(va, vdst_ss); \ ++ va_u8 = vec_packsu(va, zero_s16v); \ ++ va_u32 = vec_splat((vec_u32)va_u8, 0); \ ++ vec_ste(va_u32, element, (uint32_t*)dst); ++ ++static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) ++{ ++ vec_s16 va0, va1, va2, va3; ++ vec_s16 vz0, vz1, vz2, vz3; ++ vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; ++ vec_u8 va_u8; ++ vec_u32 va_u32; ++ vec_s16 vdst_ss; ++ const vec_u16 v6us = vec_splat_u16(6); ++ vec_u8 vdst, vdst_orig; ++ vec_u8 vdst_mask = vec_lvsl(0, dst); ++ int element = ((unsigned long)dst & 0xf) >> 2; ++ LOAD_ZERO; ++ ++ block[0] += 32; /* add 32 as a DC-level for rounding */ ++ ++ vtmp0 = vec_ld(0,block); ++ vtmp1 = vec_sld(vtmp0, vtmp0, 8); ++ vtmp2 = vec_ld(16,block); ++ vtmp3 = vec_sld(vtmp2, vtmp2, 8); ++ memset(block, 0, 16 * sizeof(int16_t)); ++ ++ VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); ++ VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); ++ VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); ++ ++ va0 = vec_sra(va0,v6us); ++ va1 = vec_sra(va1,v6us); ++ va2 = vec_sra(va2,v6us); ++ va3 = vec_sra(va3,v6us); ++ ++ VEC_LOAD_U8_ADD_S16_STORE_U8(va0); ++ dst += stride; ++ VEC_LOAD_U8_ADD_S16_STORE_U8(va1); ++ dst += stride; ++ VEC_LOAD_U8_ADD_S16_STORE_U8(va2); ++ dst += stride; ++ VEC_LOAD_U8_ADD_S16_STORE_U8(va3); ++} ++ ++#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\ ++ /* a0 = SRC(0) + SRC(4); */ \ ++ vec_s16 a0v = vec_add(s0, s4); \ ++ /* a2 = SRC(0) - SRC(4); */ \ ++ vec_s16 a2v = vec_sub(s0, s4); \ ++ /* a4 = (SRC(2)>>1) - SRC(6); */ \ ++ vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \ ++ /* a6 = (SRC(6)>>1) + SRC(2); */ \ ++ vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \ ++ /* b0 = a0 + a6; */ \ ++ vec_s16 b0v = vec_add(a0v, a6v); \ ++ /* b2 = a2 + a4; */ \ ++ vec_s16 b2v = vec_add(a2v, a4v); \ ++ /* b4 = a2 - a4; */ \ ++ vec_s16 b4v = vec_sub(a2v, a4v); \ ++ /* b6 = a0 - a6; */ \ ++ vec_s16 b6v = vec_sub(a0v, a6v); \ ++ /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \ ++ /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \ ++ vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ ++ /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \ ++ /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \ ++ vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ ++ /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \ ++ /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \ ++ vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ ++ /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \ ++ vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ ++ /* b1 = (a7>>2) + a1; */ \ ++ vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ ++ /* b3 = a3 + (a5>>2); */ \ ++ vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ ++ /* b5 = (a3>>2) - a5; */ \ ++ vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ ++ /* b7 = a7 - (a1>>2); */ \ ++ vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ ++ /* DST(0, b0 + b7); */ \ ++ d0 = vec_add(b0v, b7v); \ ++ /* DST(1, b2 + b5); */ \ ++ d1 = vec_add(b2v, b5v); \ ++ /* DST(2, b4 + b3); */ \ ++ d2 = vec_add(b4v, b3v); \ ++ /* DST(3, b6 + b1); */ \ ++ d3 = vec_add(b6v, b1v); \ ++ /* DST(4, b6 - b1); */ \ ++ d4 = vec_sub(b6v, b1v); \ ++ /* DST(5, b4 - b3); */ \ ++ d5 = vec_sub(b4v, b3v); \ ++ /* DST(6, b2 - b5); */ \ ++ d6 = vec_sub(b2v, b5v); \ ++ /* DST(7, b0 - b7); */ \ ++ d7 = vec_sub(b0v, b7v); \ ++} ++ ++#if HAVE_BIGENDIAN ++#define GET_2PERM(ldv, stv, d) \ ++ ldv = vec_lvsl(0, d); \ ++ stv = vec_lvsr(8, d); ++#define dstv_load(d) \ ++ vec_u8 hv = vec_ld( 0, d ); \ ++ vec_u8 lv = vec_ld( 7, d); \ ++ vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); ++#define dest_unligned_store(d) \ ++ vec_u8 edgehv; \ ++ vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \ ++ vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ ++ lv = vec_sel( lv, bodyv, edgelv ); \ ++ vec_st( lv, 7, d ); \ ++ hv = vec_ld( 0, d ); \ ++ edgehv = vec_perm( zero_u8v, sel, perm_stv ); \ ++ hv = vec_sel( hv, bodyv, edgehv ); \ ++ vec_st( hv, 0, d ); ++#else ++ ++#define GET_2PERM(ldv, stv, d) {} ++#define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d) ++#define dest_unligned_store(d)\ ++ vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\ ++ vec_vsx_st(dst8, 0, d) ++#endif /* HAVE_BIGENDIAN */ ++ ++#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ ++ /* unaligned load */ \ ++ dstv_load(dest); \ ++ vec_s16 idct_sh6 = vec_sra(idctv, sixv); \ ++ vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv); \ ++ vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \ ++ vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \ ++ /* unaligned store */ \ ++ dest_unligned_store(dest);\ ++} ++ ++static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride) ++{ ++ vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; ++ vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; ++ vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; ++ ++ vec_u8 perm_ldv, perm_stv; ++ GET_2PERM(perm_ldv, perm_stv, dst); ++ ++ const vec_u16 onev = vec_splat_u16(1); ++ const vec_u16 twov = vec_splat_u16(2); ++ const vec_u16 sixv = vec_splat_u16(6); ++ ++ const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1}; ++ LOAD_ZERO; ++ ++ dct[0] += 32; // rounding for the >>6 at the end ++ ++ s0 = vec_ld(0x00, (int16_t*)dct); ++ s1 = vec_ld(0x10, (int16_t*)dct); ++ s2 = vec_ld(0x20, (int16_t*)dct); ++ s3 = vec_ld(0x30, (int16_t*)dct); ++ s4 = vec_ld(0x40, (int16_t*)dct); ++ s5 = vec_ld(0x50, (int16_t*)dct); ++ s6 = vec_ld(0x60, (int16_t*)dct); ++ s7 = vec_ld(0x70, (int16_t*)dct); ++ memset(dct, 0, 64 * sizeof(int16_t)); ++ ++ IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, ++ d0, d1, d2, d3, d4, d5, d6, d7); ++ ++ TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); ++ ++ IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, ++ idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); ++ ++ ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); ++ ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); ++} ++ ++#if HAVE_BIGENDIAN ++#define DST_LD vec_ld ++#else ++#define DST_LD vec_vsx_ld ++#endif ++static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size) ++{ ++ vec_s16 dc16; ++ vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner; ++ vec_s32 v_dc32; ++ LOAD_ZERO; ++ DECLARE_ALIGNED(16, int, dc); ++ int i; ++ ++ dc = (block[0] + 32) >> 6; ++ block[0] = 0; ++ v_dc32 = vec_lde(0, &dc); ++ dc16 = VEC_SPLAT16((vec_s16)v_dc32, 1); ++ ++ if (size == 4) ++ dc16 = VEC_SLD16(dc16, zero_s16v, 8); ++ dcplus = vec_packsu(dc16, zero_s16v); ++ dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v); ++ ++#if HAVE_BIGENDIAN ++ aligner = vec_lvsr(0, dst); ++ dcplus = vec_perm(dcplus, dcplus, aligner); ++ dcminus = vec_perm(dcminus, dcminus, aligner); ++#endif ++ ++ for (i = 0; i < size; i += 4) { ++ v0 = DST_LD(0, dst+0*stride); ++ v1 = DST_LD(0, dst+1*stride); ++ v2 = DST_LD(0, dst+2*stride); ++ v3 = DST_LD(0, dst+3*stride); ++ ++ v0 = vec_adds(v0, dcplus); ++ v1 = vec_adds(v1, dcplus); ++ v2 = vec_adds(v2, dcplus); ++ v3 = vec_adds(v3, dcplus); ++ ++ v0 = vec_subs(v0, dcminus); ++ v1 = vec_subs(v1, dcminus); ++ v2 = vec_subs(v2, dcminus); ++ v3 = vec_subs(v3, dcminus); ++ ++ VEC_ST(v0, 0, dst+0*stride); ++ VEC_ST(v1, 0, dst+1*stride); ++ VEC_ST(v2, 0, dst+2*stride); ++ VEC_ST(v3, 0, dst+3*stride); ++ ++ dst += 4*stride; ++ } ++} ++ ++static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) ++{ ++ h264_idct_dc_add_internal(dst, block, stride, 4); ++} ++ ++static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) ++{ ++ h264_idct_dc_add_internal(dst, block, stride, 8); ++} ++ ++static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, ++ int16_t *block, int stride, ++ const uint8_t nnzc[5 * 8]) ++{ ++ int i; ++ for(i=0; i<16; i++){ ++ int nnz = nnzc[ scan8[i] ]; ++ if(nnz){ ++ if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); ++ else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); ++ } ++ } ++} ++ ++static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, ++ int16_t *block, int stride, ++ const uint8_t nnzc[5 * 8]) ++{ ++ int i; ++ for(i=0; i<16; i++){ ++ if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); ++ else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); ++ } ++} ++ ++static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, ++ int16_t *block, int stride, ++ const uint8_t nnzc[5 * 8]) ++{ ++ int i; ++ for(i=0; i<16; i+=4){ ++ int nnz = nnzc[ scan8[i] ]; ++ if(nnz){ ++ if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); ++ else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride); ++ } ++ } ++} ++ ++static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, ++ int16_t *block, int stride, ++ const uint8_t nnzc[15 * 8]) ++{ ++ int i, j; ++ for (j = 1; j < 3; j++) { ++ for(i = j * 16; i < j * 16 + 4; i++){ ++ if(nnzc[ scan8[i] ]) ++ h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); ++ else if(block[i*16]) ++ h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); ++ } ++ } ++} ++ ++#define transpose4x16(r0, r1, r2, r3) { \ ++ register vec_u8 r4; \ ++ register vec_u8 r5; \ ++ register vec_u8 r6; \ ++ register vec_u8 r7; \ ++ \ ++ r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \ ++ r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \ ++ r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \ ++ r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \ ++ \ ++ r0 = vec_mergeh(r4, r6); /*all set 0*/ \ ++ r1 = vec_mergel(r4, r6); /*all set 1*/ \ ++ r2 = vec_mergeh(r5, r7); /*all set 2*/ \ ++ r3 = vec_mergel(r5, r7); /*all set 3*/ \ ++} ++ ++static inline void write16x4(uint8_t *dst, int dst_stride, ++ register vec_u8 r0, register vec_u8 r1, ++ register vec_u8 r2, register vec_u8 r3) { ++ DECLARE_ALIGNED(16, unsigned char, result)[64]; ++ uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst; ++ int int_dst_stride = dst_stride/4; ++ ++ vec_st(r0, 0, result); ++ vec_st(r1, 16, result); ++ vec_st(r2, 32, result); ++ vec_st(r3, 48, result); ++ /* FIXME: there has to be a better way!!!! */ ++ *dst_int = *src_int; ++ *(dst_int+ int_dst_stride) = *(src_int + 1); ++ *(dst_int+ 2*int_dst_stride) = *(src_int + 2); ++ *(dst_int+ 3*int_dst_stride) = *(src_int + 3); ++ *(dst_int+ 4*int_dst_stride) = *(src_int + 4); ++ *(dst_int+ 5*int_dst_stride) = *(src_int + 5); ++ *(dst_int+ 6*int_dst_stride) = *(src_int + 6); ++ *(dst_int+ 7*int_dst_stride) = *(src_int + 7); ++ *(dst_int+ 8*int_dst_stride) = *(src_int + 8); ++ *(dst_int+ 9*int_dst_stride) = *(src_int + 9); ++ *(dst_int+10*int_dst_stride) = *(src_int + 10); ++ *(dst_int+11*int_dst_stride) = *(src_int + 11); ++ *(dst_int+12*int_dst_stride) = *(src_int + 12); ++ *(dst_int+13*int_dst_stride) = *(src_int + 13); ++ *(dst_int+14*int_dst_stride) = *(src_int + 14); ++ *(dst_int+15*int_dst_stride) = *(src_int + 15); ++} ++ ++/** @brief performs a 6x16 transpose of data in src, and stores it to dst ++ @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing ++ out of unaligned_load() */ ++#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ ++ register vec_u8 r0 = unaligned_load(0, src); \ ++ register vec_u8 r1 = unaligned_load( src_stride, src); \ ++ register vec_u8 r2 = unaligned_load(2* src_stride, src); \ ++ register vec_u8 r3 = unaligned_load(3* src_stride, src); \ ++ register vec_u8 r4 = unaligned_load(4* src_stride, src); \ ++ register vec_u8 r5 = unaligned_load(5* src_stride, src); \ ++ register vec_u8 r6 = unaligned_load(6* src_stride, src); \ ++ register vec_u8 r7 = unaligned_load(7* src_stride, src); \ ++ register vec_u8 r14 = unaligned_load(14*src_stride, src); \ ++ register vec_u8 r15 = unaligned_load(15*src_stride, src); \ ++ \ ++ r8 = unaligned_load( 8*src_stride, src); \ ++ r9 = unaligned_load( 9*src_stride, src); \ ++ r10 = unaligned_load(10*src_stride, src); \ ++ r11 = unaligned_load(11*src_stride, src); \ ++ r12 = unaligned_load(12*src_stride, src); \ ++ r13 = unaligned_load(13*src_stride, src); \ ++ \ ++ /*Merge first pairs*/ \ ++ r0 = vec_mergeh(r0, r8); /*0, 8*/ \ ++ r1 = vec_mergeh(r1, r9); /*1, 9*/ \ ++ r2 = vec_mergeh(r2, r10); /*2,10*/ \ ++ r3 = vec_mergeh(r3, r11); /*3,11*/ \ ++ r4 = vec_mergeh(r4, r12); /*4,12*/ \ ++ r5 = vec_mergeh(r5, r13); /*5,13*/ \ ++ r6 = vec_mergeh(r6, r14); /*6,14*/ \ ++ r7 = vec_mergeh(r7, r15); /*7,15*/ \ ++ \ ++ /*Merge second pairs*/ \ ++ r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \ ++ r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \ ++ r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \ ++ r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \ ++ r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \ ++ r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \ ++ r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \ ++ r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \ ++ \ ++ /*Third merge*/ \ ++ r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \ ++ r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \ ++ r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \ ++ r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \ ++ r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \ ++ r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \ ++ /* Don't need to compute 3 and 7*/ \ ++ \ ++ /*Final merge*/ \ ++ r8 = vec_mergeh(r0, r4); /*all set 0*/ \ ++ r9 = vec_mergel(r0, r4); /*all set 1*/ \ ++ r10 = vec_mergeh(r1, r5); /*all set 2*/ \ ++ r11 = vec_mergel(r1, r5); /*all set 3*/ \ ++ r12 = vec_mergeh(r2, r6); /*all set 4*/ \ ++ r13 = vec_mergel(r2, r6); /*all set 5*/ \ ++ /* Don't need to compute 14 and 15*/ \ ++ \ ++} ++ ++// out: o = |x-y| < a ++static inline vec_u8 diff_lt_altivec ( register vec_u8 x, ++ register vec_u8 y, ++ register vec_u8 a) { ++ ++ register vec_u8 diff = vec_subs(x, y); ++ register vec_u8 diffneg = vec_subs(y, x); ++ register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */ ++ o = (vec_u8)vec_cmplt(o, a); ++ return o; ++} ++ ++static inline vec_u8 h264_deblock_mask ( register vec_u8 p0, ++ register vec_u8 p1, ++ register vec_u8 q0, ++ register vec_u8 q1, ++ register vec_u8 alpha, ++ register vec_u8 beta) { ++ ++ register vec_u8 mask; ++ register vec_u8 tempmask; ++ ++ mask = diff_lt_altivec(p0, q0, alpha); ++ tempmask = diff_lt_altivec(p1, p0, beta); ++ mask = vec_and(mask, tempmask); ++ tempmask = diff_lt_altivec(q1, q0, beta); ++ mask = vec_and(mask, tempmask); ++ ++ return mask; ++} ++ ++// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0) ++static inline vec_u8 h264_deblock_q1(register vec_u8 p0, ++ register vec_u8 p1, ++ register vec_u8 p2, ++ register vec_u8 q0, ++ register vec_u8 tc0) { ++ ++ register vec_u8 average = vec_avg(p0, q0); ++ register vec_u8 temp; ++ register vec_u8 unclipped; ++ register vec_u8 ones; ++ register vec_u8 max; ++ register vec_u8 min; ++ register vec_u8 newp1; ++ ++ temp = vec_xor(average, p2); ++ average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ ++ ones = vec_splat_u8(1); ++ temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ ++ unclipped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ ++ max = vec_adds(p1, tc0); ++ min = vec_subs(p1, tc0); ++ newp1 = vec_max(min, unclipped); ++ newp1 = vec_min(max, newp1); ++ return newp1; ++} ++ ++#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \ ++ \ ++ const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \ ++ \ ++ register vec_u8 pq0bit = vec_xor(p0,q0); \ ++ register vec_u8 q1minus; \ ++ register vec_u8 p0minus; \ ++ register vec_u8 stage1; \ ++ register vec_u8 stage2; \ ++ register vec_u8 vec160; \ ++ register vec_u8 delta; \ ++ register vec_u8 deltaneg; \ ++ \ ++ q1minus = vec_nor(q1, q1); /* 255 - q1 */ \ ++ stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \ ++ stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \ ++ p0minus = vec_nor(p0, p0); /* 255 - p0 */ \ ++ stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \ ++ pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \ ++ stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \ ++ stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \ ++ vec160 = vec_ld(0, &A0v); \ ++ deltaneg = vec_subs(vec160, stage2); /* -d */ \ ++ delta = vec_subs(stage2, vec160); /* d */ \ ++ deltaneg = vec_min(tc0masked, deltaneg); \ ++ delta = vec_min(tc0masked, delta); \ ++ p0 = vec_subs(p0, deltaneg); \ ++ q0 = vec_subs(q0, delta); \ ++ p0 = vec_adds(p0, delta); \ ++ q0 = vec_adds(q0, deltaneg); \ ++} ++ ++#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \ ++ DECLARE_ALIGNED(16, unsigned char, temp)[16]; \ ++ register vec_u8 alphavec; \ ++ register vec_u8 betavec; \ ++ register vec_u8 mask; \ ++ register vec_u8 p1mask; \ ++ register vec_u8 q1mask; \ ++ register vector signed char tc0vec; \ ++ register vec_u8 finaltc0; \ ++ register vec_u8 tc0masked; \ ++ register vec_u8 newp1; \ ++ register vec_u8 newq1; \ ++ \ ++ temp[0] = alpha; \ ++ temp[1] = beta; \ ++ alphavec = vec_ld(0, temp); \ ++ betavec = vec_splat(alphavec, 0x1); \ ++ alphavec = vec_splat(alphavec, 0x0); \ ++ mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \ ++ \ ++ AV_COPY32(temp, tc0); \ ++ tc0vec = vec_ld(0, (signed char*)temp); \ ++ tc0vec = vec_mergeh(tc0vec, tc0vec); \ ++ tc0vec = vec_mergeh(tc0vec, tc0vec); \ ++ mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \ ++ finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \ ++ \ ++ p1mask = diff_lt_altivec(p2, p0, betavec); \ ++ p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \ ++ tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \ ++ finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \ ++ newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \ ++ /*end if*/ \ ++ \ ++ q1mask = diff_lt_altivec(q2, q0, betavec); \ ++ q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\ ++ tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \ ++ finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \ ++ newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \ ++ /*end if*/ \ ++ \ ++ h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \ ++ p1 = newp1; \ ++ q1 = newq1; \ ++} ++ ++static void h264_v_loop_filter_luma_altivec(uint8_t *pix, ptrdiff_t stride, int alpha, int beta, int8_t *tc0) { ++ ++ if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { ++ register vec_u8 p2 = vec_ld(-3*stride, pix); ++ register vec_u8 p1 = vec_ld(-2*stride, pix); ++ register vec_u8 p0 = vec_ld(-1*stride, pix); ++ register vec_u8 q0 = vec_ld(0, pix); ++ register vec_u8 q1 = vec_ld(stride, pix); ++ register vec_u8 q2 = vec_ld(2*stride, pix); ++ h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0); ++ vec_st(p1, -2*stride, pix); ++ vec_st(p0, -1*stride, pix); ++ vec_st(q0, 0, pix); ++ vec_st(q1, stride, pix); ++ } ++} ++ ++static void h264_h_loop_filter_luma_altivec(uint8_t *pix, ptrdiff_t stride, int alpha, int beta, int8_t *tc0) { ++ ++ register vec_u8 line0, line1, line2, line3, line4, line5; ++ if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) ++ return; ++ readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); ++ h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); ++ transpose4x16(line1, line2, line3, line4); ++ write16x4(pix-2, stride, line1, line2, line3, line4); ++} ++ ++static av_always_inline ++void weight_h264_W_altivec(uint8_t *block, int stride, int height, ++ int log2_denom, int weight, int offset, int w) ++{ ++ int y, aligned; ++ vec_u8 vblock; ++ vec_s16 vtemp, vweight, voffset, v0, v1; ++ vec_u16 vlog2_denom; ++ DECLARE_ALIGNED(16, int32_t, temp)[4]; ++ LOAD_ZERO; ++ ++ offset <<= log2_denom; ++ if(log2_denom) offset += 1<<(log2_denom-1); ++ temp[0] = log2_denom; ++ temp[1] = weight; ++ temp[2] = offset; ++ ++ vtemp = (vec_s16)vec_ld(0, temp); ++#if !HAVE_BIGENDIAN ++ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3)); ++#endif ++ vlog2_denom = (vec_u16)vec_splat(vtemp, 1); ++ vweight = vec_splat(vtemp, 3); ++ voffset = vec_splat(vtemp, 5); ++ aligned = !((unsigned long)block & 0xf); ++ ++ for (y = 0; y < height; y++) { ++ vblock = vec_ld(0, block); ++ ++ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vblock); ++ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vblock); ++ ++ if (w == 16 || aligned) { ++ v0 = vec_mladd(v0, vweight, zero_s16v); ++ v0 = vec_adds(v0, voffset); ++ v0 = vec_sra(v0, vlog2_denom); ++ } ++ if (w == 16 || !aligned) { ++ v1 = vec_mladd(v1, vweight, zero_s16v); ++ v1 = vec_adds(v1, voffset); ++ v1 = vec_sra(v1, vlog2_denom); ++ } ++ vblock = vec_packsu(v0, v1); ++ vec_st(vblock, 0, block); ++ ++ block += stride; ++ } ++} ++ ++static av_always_inline ++void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height, ++ int log2_denom, int weightd, int weights, int offset, int w) ++{ ++ int y, dst_aligned, src_aligned; ++ vec_u8 vsrc, vdst; ++ vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3; ++ vec_u16 vlog2_denom; ++ DECLARE_ALIGNED(16, int32_t, temp)[4]; ++ LOAD_ZERO; ++ ++ offset = ((offset + 1) | 1) << log2_denom; ++ temp[0] = log2_denom+1; ++ temp[1] = weights; ++ temp[2] = weightd; ++ temp[3] = offset; ++ ++ vtemp = (vec_s16)vec_ld(0, temp); ++#if !HAVE_BIGENDIAN ++ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3)); ++#endif ++ vlog2_denom = (vec_u16)vec_splat(vtemp, 1); ++ vweights = vec_splat(vtemp, 3); ++ vweightd = vec_splat(vtemp, 5); ++ voffset = vec_splat(vtemp, 7); ++ dst_aligned = !((unsigned long)dst & 0xf); ++ src_aligned = !((unsigned long)src & 0xf); ++ ++ for (y = 0; y < height; y++) { ++ vdst = vec_ld(0, dst); ++ vsrc = vec_ld(0, src); ++ ++ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vdst); ++ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vdst); ++ v2 = (vec_s16)VEC_MERGEH(zero_u8v, vsrc); ++ v3 = (vec_s16)VEC_MERGEL(zero_u8v, vsrc); ++ ++ if (w == 8) { ++ if (src_aligned) ++ v3 = v2; ++ else ++ v2 = v3; ++ } ++ ++ if (w == 16 || dst_aligned) { ++ v0 = vec_mladd(v0, vweightd, zero_s16v); ++ v2 = vec_mladd(v2, vweights, zero_s16v); ++ ++ v0 = vec_adds(v0, voffset); ++ v0 = vec_adds(v0, v2); ++ v0 = vec_sra(v0, vlog2_denom); ++ } ++ if (w == 16 || !dst_aligned) { ++ v1 = vec_mladd(v1, vweightd, zero_s16v); ++ v3 = vec_mladd(v3, vweights, zero_s16v); ++ ++ v1 = vec_adds(v1, voffset); ++ v1 = vec_adds(v1, v3); ++ v1 = vec_sra(v1, vlog2_denom); ++ } ++ vdst = vec_packsu(v0, v1); ++ vec_st(vdst, 0, dst); ++ ++ dst += stride; ++ src += stride; ++ } ++} ++ ++#define H264_WEIGHT(W) \ ++static void weight_h264_pixels ## W ## _altivec(uint8_t *block, ptrdiff_t stride, int height, \ ++ int log2_denom, int weight, int offset) \ ++{ \ ++ weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \ ++}\ ++static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, \ ++ int log2_denom, int weightd, int weights, int offset) \ ++{ \ ++ biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \ ++} ++ ++H264_WEIGHT(16) ++H264_WEIGHT( 8) ++#endif /* HAVE_ALTIVEC */ ++ ++av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, ++ const int chroma_format_idc) ++{ ++#if HAVE_ALTIVEC ++ if (!PPC_ALTIVEC(av_get_cpu_flags())) ++ return; ++ ++ if (bit_depth == 8) { ++ c->h264_idct_add = h264_idct_add_altivec; ++ if (chroma_format_idc <= 1) ++ c->h264_idct_add8 = h264_idct_add8_altivec; ++ c->h264_idct_add16 = h264_idct_add16_altivec; ++ c->h264_idct_add16intra = h264_idct_add16intra_altivec; ++ c->h264_idct_dc_add= h264_idct_dc_add_altivec; ++ c->h264_idct8_dc_add = h264_idct8_dc_add_altivec; ++ c->h264_idct8_add = h264_idct8_add_altivec; ++ c->h264_idct8_add4 = h264_idct8_add4_altivec; ++ c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; ++ c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; ++ ++ c->weight_h264_pixels_tab[0] = weight_h264_pixels16_altivec; ++ c->weight_h264_pixels_tab[1] = weight_h264_pixels8_altivec; ++ c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec; ++ c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec; ++ } ++#endif /* HAVE_ALTIVEC */ ++} +Index: chromium-120.0.6099.71/third_party/ffmpeg/libavcodec/ppc/h264qpel_ppc.c +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/ffmpeg/libavcodec/ppc/h264qpel_ppc.c +@@ -0,0 +1,315 @@ ++/* ++ * Copyright (c) 2004 Romain Dolbeau ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "config.h" ++ ++#include "libavutil/attributes.h" ++#include "libavutil/cpu.h" ++#include "libavutil/intreadwrite.h" ++#include "libavutil/mem_internal.h" ++#include "libavutil/ppc/cpu.h" ++#include "libavutil/ppc/util_altivec.h" ++ ++#include "libavcodec/h264qpel.h" ++ ++#include "hpeldsp_altivec.h" ++ ++#if HAVE_ALTIVEC ++ ++#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s ++#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) ++ ++#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC ++#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec ++#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num ++#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec ++#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num ++#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec ++#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num ++#include "h264qpel_template.c" ++#undef OP_U8_ALTIVEC ++#undef PREFIX_h264_qpel16_h_lowpass_altivec ++#undef PREFIX_h264_qpel16_h_lowpass_num ++#undef PREFIX_h264_qpel16_v_lowpass_altivec ++#undef PREFIX_h264_qpel16_v_lowpass_num ++#undef PREFIX_h264_qpel16_hv_lowpass_altivec ++#undef PREFIX_h264_qpel16_hv_lowpass_num ++ ++#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC ++#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec ++#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num ++#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec ++#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num ++#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec ++#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num ++#include "h264qpel_template.c" ++#undef OP_U8_ALTIVEC ++#undef PREFIX_h264_qpel16_h_lowpass_altivec ++#undef PREFIX_h264_qpel16_h_lowpass_num ++#undef PREFIX_h264_qpel16_v_lowpass_altivec ++#undef PREFIX_h264_qpel16_v_lowpass_num ++#undef PREFIX_h264_qpel16_hv_lowpass_altivec ++#undef PREFIX_h264_qpel16_hv_lowpass_num ++ ++#define H264_MC(OPNAME, SIZE, CODETYPE) \ ++static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{ \ ++ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ ++ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ ++ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ ++}\ ++\ ++static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ ++{\ ++ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ ++ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ ++ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ ++ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ ++ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ ++}\ ++ ++#if HAVE_BIGENDIAN ++#define put_unligned_store(s, dest) { \ ++ tmp1 = vec_ld(0, dest); \ ++ mask = vec_lvsl(0, dest); \ ++ tmp2 = vec_ld(15, dest); \ ++ edges = vec_perm(tmp2, tmp1, mask); \ ++ align = vec_lvsr(0, dest); \ ++ tmp2 = vec_perm(s, edges, align); \ ++ tmp1 = vec_perm(edges, s, align); \ ++ vec_st(tmp2, 15, dest); \ ++ vec_st(tmp1, 0 , dest); \ ++ } ++#else ++#define put_unligned_store(s, dest) vec_vsx_st(s, 0, dest); ++#endif /* HAVE_BIGENDIAN */ ++ ++static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, ++ const uint8_t * src2, int dst_stride, ++ int src_stride1, int h) ++{ ++ int i; ++ vec_u8 a, b, d, mask_; ++#if HAVE_BIGENDIAN ++ vec_u8 tmp1, tmp2, mask, edges, align; ++ mask_ = vec_lvsl(0, src2); ++#endif ++ ++ for (i = 0; i < h; i++) { ++ a = unaligned_load(i * src_stride1, src1); ++ b = load_with_perm_vec(i * 16, src2, mask_); ++ d = vec_avg(a, b); ++ put_unligned_store(d, dst); ++ dst += dst_stride; ++ } ++} ++ ++#if HAVE_BIGENDIAN ++#define avg_unligned_store(s, dest){ \ ++ tmp1 = vec_ld(0, dest); \ ++ mask = vec_lvsl(0, dest); \ ++ tmp2 = vec_ld(15, dest); \ ++ a = vec_avg(vec_perm(tmp1, tmp2, mask), s); \ ++ edges = vec_perm(tmp2, tmp1, mask); \ ++ align = vec_lvsr(0, dest); \ ++ tmp2 = vec_perm(a, edges, align); \ ++ tmp1 = vec_perm(edges, a, align); \ ++ vec_st(tmp2, 15, dest); \ ++ vec_st(tmp1, 0 , dest); \ ++ } ++#else ++#define avg_unligned_store(s, dest){ \ ++ a = vec_avg(vec_vsx_ld(0, dst), s); \ ++ vec_vsx_st(a, 0, dst); \ ++ } ++#endif /* HAVE_BIGENDIAN */ ++ ++static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, ++ const uint8_t * src2, int dst_stride, ++ int src_stride1, int h) ++{ ++ int i; ++ vec_u8 a, b, d, mask_; ++ ++#if HAVE_BIGENDIAN ++ vec_u8 tmp1, tmp2, mask, edges, align; ++ mask_ = vec_lvsl(0, src2); ++#endif ++ ++ for (i = 0; i < h; i++) { ++ a = unaligned_load(i * src_stride1, src1); ++ b = load_with_perm_vec(i * 16, src2, mask_); ++ d = vec_avg(a, b); ++ avg_unligned_store(d, dst); ++ dst += dst_stride; ++ } ++} ++ ++/* Implemented but could be faster ++#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h) ++#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) ++ */ ++ ++H264_MC(put_, 16, altivec) ++H264_MC(avg_, 16, altivec) ++#endif /* HAVE_ALTIVEC */ ++ ++av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth) ++{ ++#if HAVE_ALTIVEC ++ const int high_bit_depth = bit_depth > 8; ++ ++ if (!PPC_ALTIVEC(av_get_cpu_flags())) ++ return; ++ ++ if (!high_bit_depth) { ++#define dspfunc(PFX, IDX, NUM) \ ++ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ ++ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ ++ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ ++ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ ++ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ ++ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ ++ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ ++ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec ++ ++ dspfunc(put_h264_qpel, 0, 16); ++ dspfunc(avg_h264_qpel, 0, 16); ++#undef dspfunc ++ } ++#endif /* HAVE_ALTIVEC */ ++} diff --git a/0003-third_party-libvpx-Add-ppc64-generated-config.patch b/0003-third_party-libvpx-Add-ppc64-generated-config.patch new file mode 100644 index 0000000..d89d4c3 --- /dev/null +++ b/0003-third_party-libvpx-Add-ppc64-generated-config.patch @@ -0,0 +1,5075 @@ +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vp8_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vp8_rtcd.h +@@ -0,0 +1,316 @@ ++// This file is generated. Do not edit. ++#ifndef VP8_RTCD_H_ ++#define VP8_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP8 ++ */ ++ ++struct blockd; ++struct macroblockd; ++struct loop_filter_info; ++ ++/* Encoder forward decls */ ++struct block; ++struct macroblock; ++struct variance_vtable; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_bilinear_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c ++ ++void vp8_bilinear_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c ++ ++void vp8_bilinear_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c ++ ++void vp8_bilinear_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c ++ ++int vp8_block_error_c(short* coeff, short* dqcoeff); ++#define vp8_block_error vp8_block_error_c ++ ++void vp8_copy32xn_c(const unsigned char* src_ptr, ++ int src_stride, ++ unsigned char* dst_ptr, ++ int dst_stride, ++ int height); ++#define vp8_copy32xn vp8_copy32xn_c ++ ++void vp8_copy_mem16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem16x16 vp8_copy_mem16x16_c ++ ++void vp8_copy_mem8x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x4 vp8_copy_mem8x4_c ++ ++void vp8_copy_mem8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride); ++#define vp8_copy_mem8x8 vp8_copy_mem8x8_c ++ ++void vp8_dc_only_idct_add_c(short input_dc, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c ++ ++int vp8_denoiser_filter_c(unsigned char* mc_running_avg_y, ++ int mc_avg_y_stride, ++ unsigned char* running_avg_y, ++ int avg_y_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter vp8_denoiser_filter_c ++ ++int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg, ++ int mc_avg_stride, ++ unsigned char* running_avg, ++ int avg_stride, ++ unsigned char* sig, ++ int sig_stride, ++ unsigned int motion_magnitude, ++ int increase_denoising); ++#define vp8_denoiser_filter_uv vp8_denoiser_filter_uv_c ++ ++void vp8_dequant_idct_add_c(short* input, ++ short* dq, ++ unsigned char* dest, ++ int stride); ++#define vp8_dequant_idct_add vp8_dequant_idct_add_c ++ ++void vp8_dequant_idct_add_uv_block_c(short* q, ++ short* dq, ++ unsigned char* dst_u, ++ unsigned char* dst_v, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_uv_block vp8_dequant_idct_add_uv_block_c ++ ++void vp8_dequant_idct_add_y_block_c(short* q, ++ short* dq, ++ unsigned char* dst, ++ int stride, ++ char* eobs); ++#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c ++ ++void vp8_dequantize_b_c(struct blockd*, short* DQC); ++#define vp8_dequantize_b vp8_dequantize_b_c ++ ++int vp8_diamond_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ union int_mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_diamond_search_sad vp8_diamond_search_sad_c ++ ++void vp8_fast_quantize_b_c(struct block*, struct blockd*); ++#define vp8_fast_quantize_b vp8_fast_quantize_b_c ++ ++void vp8_filter_by_weight16x16_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight16x16 vp8_filter_by_weight16x16_c ++ ++void vp8_filter_by_weight4x4_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight4x4 vp8_filter_by_weight4x4_c ++ ++void vp8_filter_by_weight8x8_c(unsigned char* src, ++ int src_stride, ++ unsigned char* dst, ++ int dst_stride, ++ int src_weight); ++#define vp8_filter_by_weight8x8 vp8_filter_by_weight8x8_c ++ ++void vp8_loop_filter_bh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bh vp8_loop_filter_bh_c ++ ++void vp8_loop_filter_bv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_bv vp8_loop_filter_bv_c ++ ++void vp8_loop_filter_mbh_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c ++ ++void vp8_loop_filter_mbv_c(unsigned char* y_ptr, ++ unsigned char* u_ptr, ++ unsigned char* v_ptr, ++ int y_stride, ++ int uv_stride, ++ struct loop_filter_info* lfi); ++#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c ++ ++void vp8_loop_filter_bhs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c ++ ++void vp8_loop_filter_bvs_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c ++ ++void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c ++ ++void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr, ++ int y_stride, ++ const unsigned char* blimit); ++#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c ++ ++int vp8_mbblock_error_c(struct macroblock* mb, int dc); ++#define vp8_mbblock_error vp8_mbblock_error_c ++ ++int vp8_mbuverror_c(struct macroblock* mb); ++#define vp8_mbuverror vp8_mbuverror_c ++ ++int vp8_refining_search_sad_c(struct macroblock* x, ++ struct block* b, ++ struct blockd* d, ++ union int_mv* ref_mv, ++ int error_per_bit, ++ int search_range, ++ struct variance_vtable* fn_ptr, ++ int* mvcost[2], ++ union int_mv* center_mv); ++#define vp8_refining_search_sad vp8_refining_search_sad_c ++ ++void vp8_regular_quantize_b_c(struct block*, struct blockd*); ++#define vp8_regular_quantize_b vp8_regular_quantize_b_c ++ ++void vp8_short_fdct4x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct4x4 vp8_short_fdct4x4_c ++ ++void vp8_short_fdct8x4_c(short* input, short* output, int pitch); ++#define vp8_short_fdct8x4 vp8_short_fdct8x4_c ++ ++void vp8_short_idct4x4llm_c(short* input, ++ unsigned char* pred_ptr, ++ int pred_stride, ++ unsigned char* dst_ptr, ++ int dst_stride); ++#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c ++ ++void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c ++ ++void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff); ++#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c ++ ++void vp8_short_walsh4x4_c(short* input, short* output, int pitch); ++#define vp8_short_walsh4x4 vp8_short_walsh4x4_c ++ ++void vp8_sixtap_predict16x16_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c ++ ++void vp8_sixtap_predict4x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c ++ ++void vp8_sixtap_predict8x4_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c ++ ++void vp8_sixtap_predict8x8_c(unsigned char* src_ptr, ++ int src_pixels_per_line, ++ int xoffset, ++ int yoffset, ++ unsigned char* dst_ptr, ++ int dst_pitch); ++#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c ++ ++void vp8_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vp9_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vp9_rtcd.h +@@ -0,0 +1,267 @@ ++// This file is generated. Do not edit. ++#ifndef VP9_RTCD_H_ ++#define VP9_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * VP9 ++ */ ++ ++#include "vp9/common/vp9_common.h" ++#include "vp9/common/vp9_enums.h" ++#include "vp9/common/vp9_filter.h" ++#include "vpx/vpx_integer.h" ++ ++struct macroblockd; ++ ++/* Encoder forward decls */ ++struct macroblock; ++struct macroblock_plane; ++struct vp9_sad_table; ++struct ScanOrder; ++struct search_site_config; ++struct mv; ++union int_mv; ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++int64_t vp9_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz); ++#define vp9_block_error vp9_block_error_c ++ ++int64_t vp9_block_error_fp_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ int block_size); ++#define vp9_block_error_fp vp9_block_error_fp_c ++ ++int vp9_denoiser_filter_c(const uint8_t* sig, ++ int sig_stride, ++ const uint8_t* mc_avg, ++ int mc_avg_stride, ++ uint8_t* avg, ++ int avg_stride, ++ int increase_denoising, ++ BLOCK_SIZE bs, ++ int motion_magnitude); ++#define vp9_denoiser_filter vp9_denoiser_filter_c ++ ++int vp9_diamond_search_sad_c(const struct macroblock* x, ++ const struct search_site_config* cfg, ++ struct mv* ref_mv, ++ uint32_t start_mv_sad, ++ struct mv* best_mv, ++ int search_param, ++ int sad_per_bit, ++ int* num00, ++ const struct vp9_sad_table* sad_fn_ptr, ++ const struct mv* center_mv); ++#define vp9_diamond_search_sad vp9_diamond_search_sad_c ++ ++void vp9_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht16x16 vp9_fht16x16_c ++ ++void vp9_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht4x4 vp9_fht4x4_c ++ ++void vp9_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_fht8x8 vp9_fht8x8_c ++ ++void vp9_filter_by_weight16x16_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight16x16 vp9_filter_by_weight16x16_c ++ ++void vp9_filter_by_weight8x8_c(const uint8_t* src, ++ int src_stride, ++ uint8_t* dst, ++ int dst_stride, ++ int src_weight); ++#define vp9_filter_by_weight8x8 vp9_filter_by_weight8x8_c ++ ++void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_fwht4x4 vp9_fwht4x4_c ++ ++int64_t vp9_highbd_block_error_c(const tran_low_t* coeff, ++ const tran_low_t* dqcoeff, ++ intptr_t block_size, ++ int64_t* ssz, ++ int bd); ++#define vp9_highbd_block_error vp9_highbd_block_error_c ++ ++void vp9_highbd_fht16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht16x16 vp9_highbd_fht16x16_c ++ ++void vp9_highbd_fht4x4_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht4x4 vp9_highbd_fht4x4_c ++ ++void vp9_highbd_fht8x8_c(const int16_t* input, ++ tran_low_t* output, ++ int stride, ++ int tx_type); ++#define vp9_highbd_fht8x8 vp9_highbd_fht8x8_c ++ ++void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c ++ ++void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_c ++ ++void vp9_highbd_iht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht4x4_16_add vp9_highbd_iht4x4_16_add_c ++ ++void vp9_highbd_iht8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int tx_type, ++ int bd); ++#define vp9_highbd_iht8x8_64_add vp9_highbd_iht8x8_64_add_c ++ ++void vp9_highbd_mbpost_proc_across_ip_c(uint16_t* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_across_ip vp9_highbd_mbpost_proc_across_ip_c ++ ++void vp9_highbd_mbpost_proc_down_c(uint16_t* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_mbpost_proc_down vp9_highbd_mbpost_proc_down_c ++ ++void vp9_highbd_post_proc_down_and_across_c(const uint16_t* src_ptr, ++ uint16_t* dst_ptr, ++ int src_pixels_per_line, ++ int dst_pixels_per_line, ++ int rows, ++ int cols, ++ int flimit); ++#define vp9_highbd_post_proc_down_and_across \ ++ vp9_highbd_post_proc_down_and_across_c ++ ++void vp9_highbd_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vp9_highbd_quantize_fp vp9_highbd_quantize_fp_c ++ ++void vp9_highbd_quantize_fp_32x32_c( ++ const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vp9_highbd_quantize_fp_32x32 vp9_highbd_quantize_fp_32x32_c ++ ++void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1, ++ unsigned int stride, ++ const uint8_t* frame2, ++ unsigned int block_width, ++ unsigned int block_height, ++ int strength, ++ int* blk_fw, ++ int use_32x32, ++ uint32_t* accumulator, ++ uint16_t* count); ++#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c ++ ++void vp9_iht16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c ++ ++void vp9_iht4x4_16_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht4x4_16_add vp9_iht4x4_16_add_c ++ ++void vp9_iht8x8_64_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride, ++ int tx_type); ++#define vp9_iht8x8_64_add vp9_iht8x8_64_add_c ++ ++void vp9_quantize_fp_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vp9_quantize_fp vp9_quantize_fp_c ++ ++void vp9_quantize_fp_32x32_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vp9_quantize_fp_32x32 vp9_quantize_fp_32x32_c ++ ++void vp9_scale_and_extend_frame_c(const struct yv12_buffer_config* src, ++ struct yv12_buffer_config* dst, ++ INTERP_FILTER filter_type, ++ int phase_scaler); ++#define vp9_scale_and_extend_frame vp9_scale_and_extend_frame_c ++ ++void vp9_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.asm +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.asm +@@ -0,0 +1,107 @@ ++@ This file was created from a .asm file ++@ using the ads2gas.pl script. ++.syntax unified ++.equ VPX_ARCH_ARM , 0 ++.equ ARCH_ARM , 0 ++.equ VPX_ARCH_AARCH64 , 0 ++.equ ARCH_AARCH64 , 0 ++.equ VPX_ARCH_MIPS , 0 ++.equ ARCH_MIPS , 0 ++.equ VPX_ARCH_X86 , 0 ++.equ ARCH_X86 , 0 ++.equ VPX_ARCH_X86_64 , 0 ++.equ ARCH_X86_64 , 0 ++.equ VPX_ARCH_PPC , 0 ++.equ ARCH_PPC , 0 ++.equ VPX_ARCH_LOONGARCH , 0 ++.equ ARCH_LOONGARCH , 0 ++.equ HAVE_NEON_ASM , 0 ++.equ HAVE_NEON , 0 ++.equ HAVE_NEON_DOTPROD , 0 ++.equ HAVE_NEON_I8MM , 0 ++.equ HAVE_SVE , 0 ++.equ HAVE_MIPS32 , 0 ++.equ HAVE_DSPR2 , 0 ++.equ HAVE_MSA , 0 ++.equ HAVE_MIPS64 , 0 ++.equ HAVE_MMX , 0 ++.equ HAVE_SSE , 0 ++.equ HAVE_SSE2 , 0 ++.equ HAVE_SSE3 , 0 ++.equ HAVE_SSSE3 , 0 ++.equ HAVE_SSE4_1 , 0 ++.equ HAVE_AVX , 0 ++.equ HAVE_AVX2 , 0 ++.equ HAVE_AVX512 , 0 ++.equ HAVE_VSX , 0 ++.equ HAVE_MMI , 0 ++.equ HAVE_LSX , 0 ++.equ HAVE_LASX , 0 ++.equ HAVE_VPX_PORTS , 1 ++.equ HAVE_PTHREAD_H , 1 ++.equ HAVE_UNISTD_H , 0 ++.equ CONFIG_DEPENDENCY_TRACKING , 1 ++.equ CONFIG_EXTERNAL_BUILD , 1 ++.equ CONFIG_INSTALL_DOCS , 0 ++.equ CONFIG_INSTALL_BINS , 1 ++.equ CONFIG_INSTALL_LIBS , 1 ++.equ CONFIG_INSTALL_SRCS , 0 ++.equ CONFIG_DEBUG , 0 ++.equ CONFIG_GPROF , 0 ++.equ CONFIG_GCOV , 0 ++.equ CONFIG_RVCT , 0 ++.equ CONFIG_GCC , 1 ++.equ CONFIG_MSVS , 0 ++.equ CONFIG_PIC , 0 ++.equ CONFIG_BIG_ENDIAN , 0 ++.equ CONFIG_CODEC_SRCS , 0 ++.equ CONFIG_DEBUG_LIBS , 0 ++.equ CONFIG_DEQUANT_TOKENS , 0 ++.equ CONFIG_DC_RECON , 0 ++.equ CONFIG_RUNTIME_CPU_DETECT , 0 ++.equ CONFIG_POSTPROC , 1 ++.equ CONFIG_VP9_POSTPROC , 1 ++.equ CONFIG_MULTITHREAD , 1 ++.equ CONFIG_INTERNAL_STATS , 0 ++.equ CONFIG_VP8_ENCODER , 1 ++.equ CONFIG_VP8_DECODER , 1 ++.equ CONFIG_VP9_ENCODER , 1 ++.equ CONFIG_VP9_DECODER , 1 ++.equ CONFIG_VP8 , 1 ++.equ CONFIG_VP9 , 1 ++.equ CONFIG_ENCODERS , 1 ++.equ CONFIG_DECODERS , 1 ++.equ CONFIG_STATIC_MSVCRT , 0 ++.equ CONFIG_SPATIAL_RESAMPLING , 1 ++.equ CONFIG_REALTIME_ONLY , 1 ++.equ CONFIG_ONTHEFLY_BITPACKING , 0 ++.equ CONFIG_ERROR_CONCEALMENT , 0 ++.equ CONFIG_SHARED , 0 ++.equ CONFIG_STATIC , 1 ++.equ CONFIG_SMALL , 0 ++.equ CONFIG_POSTPROC_VISUALIZER , 0 ++.equ CONFIG_OS_SUPPORT , 1 ++.equ CONFIG_UNIT_TESTS , 1 ++.equ CONFIG_WEBM_IO , 1 ++.equ CONFIG_LIBYUV , 0 ++.equ CONFIG_DECODE_PERF_TESTS , 0 ++.equ CONFIG_ENCODE_PERF_TESTS , 0 ++.equ CONFIG_MULTI_RES_ENCODING , 1 ++.equ CONFIG_TEMPORAL_DENOISING , 1 ++.equ CONFIG_VP9_TEMPORAL_DENOISING , 1 ++.equ CONFIG_COEFFICIENT_RANGE_CHECKING , 0 ++.equ CONFIG_VP9_HIGHBITDEPTH , 1 ++.equ CONFIG_BETTER_HW_COMPATIBILITY , 0 ++.equ CONFIG_EXPERIMENTAL , 0 ++.equ CONFIG_SIZE_LIMIT , 1 ++.equ CONFIG_ALWAYS_ADJUST_BPM , 0 ++.equ CONFIG_BITSTREAM_DEBUG , 0 ++.equ CONFIG_MISMATCH_DEBUG , 0 ++.equ CONFIG_FP_MB_STATS , 0 ++.equ CONFIG_EMULATE_HARDWARE , 0 ++.equ CONFIG_NON_GREEDY_MV , 0 ++.equ CONFIG_RATE_CTRL , 0 ++.equ CONFIG_COLLECT_COMPONENT_TIMING , 0 ++.equ DECODE_WIDTH_LIMIT , 16384 ++.equ DECODE_HEIGHT_LIMIT , 16384 ++ .section .note.GNU-stack,"",%progbits +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.c +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.c +@@ -0,0 +1,10 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++#include "vpx/vpx_codec.h" ++static const char* const cfg = "--target=generic-gnu --enable-vp9-highbitdepth --enable-external-build --enable-postproc --enable-multi-res-encoding --enable-temporal-denoising --enable-vp9-temporal-denoising --enable-vp9-postproc --size-limit=16384x16384 --enable-realtime-only --disable-install-docs --disable-libyuv"; ++const char *vpx_codec_build_config(void) {return cfg;} +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_config.h +@@ -0,0 +1,116 @@ ++/* Copyright (c) 2011 The WebM project authors. All Rights Reserved. */ ++/* */ ++/* Use of this source code is governed by a BSD-style license */ ++/* that can be found in the LICENSE file in the root of the source */ ++/* tree. An additional intellectual property rights grant can be found */ ++/* in the file PATENTS. All contributing project authors may */ ++/* be found in the AUTHORS file in the root of the source tree. */ ++/* This file automatically generated by configure. Do not edit! */ ++#ifndef VPX_CONFIG_H ++#define VPX_CONFIG_H ++#define RESTRICT ++#define INLINE inline ++#define VPX_ARCH_ARM 0 ++#define ARCH_ARM 0 ++#define VPX_ARCH_AARCH64 0 ++#define ARCH_AARCH64 0 ++#define VPX_ARCH_MIPS 0 ++#define ARCH_MIPS 0 ++#define VPX_ARCH_X86 0 ++#define ARCH_X86 0 ++#define VPX_ARCH_X86_64 0 ++#define ARCH_X86_64 0 ++#define VPX_ARCH_PPC 0 ++#define ARCH_PPC 0 ++#define VPX_ARCH_LOONGARCH 0 ++#define ARCH_LOONGARCH 0 ++#define HAVE_NEON_ASM 0 ++#define HAVE_NEON 0 ++#define HAVE_NEON_DOTPROD 0 ++#define HAVE_NEON_I8MM 0 ++#define HAVE_SVE 0 ++#define HAVE_MIPS32 0 ++#define HAVE_DSPR2 0 ++#define HAVE_MSA 0 ++#define HAVE_MIPS64 0 ++#define HAVE_MMX 0 ++#define HAVE_SSE 0 ++#define HAVE_SSE2 0 ++#define HAVE_SSE3 0 ++#define HAVE_SSSE3 0 ++#define HAVE_SSE4_1 0 ++#define HAVE_AVX 0 ++#define HAVE_AVX2 0 ++#define HAVE_AVX512 0 ++#define HAVE_VSX 0 ++#define HAVE_MMI 0 ++#define HAVE_LSX 0 ++#define HAVE_LASX 0 ++#define HAVE_VPX_PORTS 1 ++#define HAVE_PTHREAD_H 1 ++#define HAVE_UNISTD_H 0 ++#define CONFIG_DEPENDENCY_TRACKING 1 ++#define CONFIG_EXTERNAL_BUILD 1 ++#define CONFIG_INSTALL_DOCS 0 ++#define CONFIG_INSTALL_BINS 1 ++#define CONFIG_INSTALL_LIBS 1 ++#define CONFIG_INSTALL_SRCS 0 ++#define CONFIG_DEBUG 0 ++#define CONFIG_GPROF 0 ++#define CONFIG_GCOV 0 ++#define CONFIG_RVCT 0 ++#define CONFIG_GCC 1 ++#define CONFIG_MSVS 0 ++#define CONFIG_PIC 0 ++#define CONFIG_BIG_ENDIAN 0 ++#define CONFIG_CODEC_SRCS 0 ++#define CONFIG_DEBUG_LIBS 0 ++#define CONFIG_DEQUANT_TOKENS 0 ++#define CONFIG_DC_RECON 0 ++#define CONFIG_RUNTIME_CPU_DETECT 0 ++#define CONFIG_POSTPROC 1 ++#define CONFIG_VP9_POSTPROC 1 ++#define CONFIG_MULTITHREAD 1 ++#define CONFIG_INTERNAL_STATS 0 ++#define CONFIG_VP8_ENCODER 1 ++#define CONFIG_VP8_DECODER 1 ++#define CONFIG_VP9_ENCODER 1 ++#define CONFIG_VP9_DECODER 1 ++#define CONFIG_VP8 1 ++#define CONFIG_VP9 1 ++#define CONFIG_ENCODERS 1 ++#define CONFIG_DECODERS 1 ++#define CONFIG_STATIC_MSVCRT 0 ++#define CONFIG_SPATIAL_RESAMPLING 1 ++#define CONFIG_REALTIME_ONLY 1 ++#define CONFIG_ONTHEFLY_BITPACKING 0 ++#define CONFIG_ERROR_CONCEALMENT 0 ++#define CONFIG_SHARED 0 ++#define CONFIG_STATIC 1 ++#define CONFIG_SMALL 0 ++#define CONFIG_POSTPROC_VISUALIZER 0 ++#define CONFIG_OS_SUPPORT 1 ++#define CONFIG_UNIT_TESTS 1 ++#define CONFIG_WEBM_IO 1 ++#define CONFIG_LIBYUV 0 ++#define CONFIG_DECODE_PERF_TESTS 0 ++#define CONFIG_ENCODE_PERF_TESTS 0 ++#define CONFIG_MULTI_RES_ENCODING 1 ++#define CONFIG_TEMPORAL_DENOISING 1 ++#define CONFIG_VP9_TEMPORAL_DENOISING 1 ++#define CONFIG_COEFFICIENT_RANGE_CHECKING 0 ++#define CONFIG_VP9_HIGHBITDEPTH 1 ++#define CONFIG_BETTER_HW_COMPATIBILITY 0 ++#define CONFIG_EXPERIMENTAL 0 ++#define CONFIG_SIZE_LIMIT 1 ++#define CONFIG_ALWAYS_ADJUST_BPM 0 ++#define CONFIG_BITSTREAM_DEBUG 0 ++#define CONFIG_MISMATCH_DEBUG 0 ++#define CONFIG_FP_MB_STATS 0 ++#define CONFIG_EMULATE_HARDWARE 0 ++#define CONFIG_NON_GREEDY_MV 0 ++#define CONFIG_RATE_CTRL 0 ++#define CONFIG_COLLECT_COMPONENT_TIMING 0 ++#define DECODE_WIDTH_LIMIT 16384 ++#define DECODE_HEIGHT_LIMIT 16384 ++#endif /* VPX_CONFIG_H */ +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_dsp_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_dsp_rtcd.h +@@ -0,0 +1,4128 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_DSP_RTCD_H_ ++#define VPX_DSP_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++/* ++ * DSP ++ */ ++ ++#include "vpx/vpx_integer.h" ++#include "vpx_dsp/vpx_dsp_common.h" ++#include "vpx_dsp/vpx_filter.h" ++#if CONFIG_VP9_ENCODER ++struct macroblock_plane; ++struct ScanOrder; ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++unsigned int vpx_avg_4x4_c(const uint8_t*, int p); ++#define vpx_avg_4x4 vpx_avg_4x4_c ++ ++unsigned int vpx_avg_8x8_c(const uint8_t*, int p); ++#define vpx_avg_8x8 vpx_avg_8x8_c ++ ++void vpx_comp_avg_pred_c(uint8_t* comp_pred, ++ const uint8_t* pred, ++ int width, ++ int height, ++ const uint8_t* ref, ++ int ref_stride); ++#define vpx_comp_avg_pred vpx_comp_avg_pred_c ++ ++void vpx_convolve8_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8 vpx_convolve8_c ++ ++void vpx_convolve8_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg vpx_convolve8_avg_c ++ ++void vpx_convolve8_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_horiz vpx_convolve8_avg_horiz_c ++ ++void vpx_convolve8_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_avg_vert vpx_convolve8_avg_vert_c ++ ++void vpx_convolve8_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_horiz vpx_convolve8_horiz_c ++ ++void vpx_convolve8_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve8_vert vpx_convolve8_vert_c ++ ++void vpx_convolve_avg_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_avg vpx_convolve_avg_c ++ ++void vpx_convolve_copy_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_convolve_copy vpx_convolve_copy_c ++ ++void vpx_d117_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c ++ ++void vpx_d117_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c ++ ++void vpx_d117_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c ++ ++void vpx_d117_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c ++ ++void vpx_d135_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c ++ ++void vpx_d135_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c ++ ++void vpx_d135_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c ++ ++void vpx_d135_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c ++ ++void vpx_d153_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c ++ ++void vpx_d153_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c ++ ++void vpx_d153_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c ++ ++void vpx_d153_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c ++ ++void vpx_d207_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c ++ ++void vpx_d207_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c ++ ++void vpx_d207_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c ++ ++void vpx_d207_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c ++ ++void vpx_d45_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c ++ ++void vpx_d45_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c ++ ++void vpx_d45_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c ++ ++void vpx_d45_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c ++ ++void vpx_d45e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c ++ ++void vpx_d63_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c ++ ++void vpx_d63_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c ++ ++void vpx_d63_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c ++ ++void vpx_d63_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c ++ ++void vpx_d63e_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c ++ ++void vpx_dc_128_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c ++ ++void vpx_dc_128_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c ++ ++void vpx_dc_128_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c ++ ++void vpx_dc_128_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c ++ ++void vpx_dc_left_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c ++ ++void vpx_dc_left_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c ++ ++void vpx_dc_left_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c ++ ++void vpx_dc_left_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c ++ ++void vpx_dc_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c ++ ++void vpx_dc_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c ++ ++void vpx_dc_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c ++ ++void vpx_dc_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c ++ ++void vpx_dc_top_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c ++ ++void vpx_dc_top_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c ++ ++void vpx_dc_top_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c ++ ++void vpx_dc_top_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c ++ ++void vpx_fdct16x16_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16 vpx_fdct16x16_c ++ ++void vpx_fdct16x16_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct16x16_1 vpx_fdct16x16_1_c ++ ++void vpx_fdct32x32_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32 vpx_fdct32x32_c ++ ++void vpx_fdct32x32_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_1 vpx_fdct32x32_1_c ++ ++void vpx_fdct32x32_rd_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct32x32_rd vpx_fdct32x32_rd_c ++ ++void vpx_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4 vpx_fdct4x4_c ++ ++void vpx_fdct4x4_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct4x4_1 vpx_fdct4x4_1_c ++ ++void vpx_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8 vpx_fdct8x8_c ++ ++void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_fdct8x8_1 vpx_fdct8x8_1_c ++ ++void vpx_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get16x16var vpx_get16x16var_c ++ ++unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr, ++ int src_stride, ++ const unsigned char* ref_ptr, ++ int ref_stride); ++#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c ++ ++void vpx_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_get8x8var vpx_get8x8var_c ++ ++unsigned int vpx_get_mb_ss_c(const int16_t*); ++#define vpx_get_mb_ss vpx_get_mb_ss_c ++ ++void vpx_h_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c ++ ++void vpx_h_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c ++ ++void vpx_h_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c ++ ++void vpx_h_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c ++ ++void vpx_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_16x16 vpx_hadamard_16x16_c ++ ++void vpx_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_32x32 vpx_hadamard_32x32_c ++ ++void vpx_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_hadamard_8x8 vpx_hadamard_8x8_c ++ ++void vpx_he_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c ++ ++void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c ++ ++void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c ++ ++unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c ++ ++unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c ++ ++unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c ++ ++unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_10_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_10_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_10_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x16 \ ++ vpx_highbd_10_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x32 \ ++ vpx_highbd_10_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance16x8 \ ++ vpx_highbd_10_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x16 \ ++ vpx_highbd_10_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x32 \ ++ vpx_highbd_10_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance32x64 \ ++ vpx_highbd_10_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x4 \ ++ vpx_highbd_10_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance4x8 \ ++ vpx_highbd_10_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x32 \ ++ vpx_highbd_10_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance64x64 \ ++ vpx_highbd_10_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x16 \ ++ vpx_highbd_10_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x4 \ ++ vpx_highbd_10_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_10_sub_pixel_variance8x8 \ ++ vpx_highbd_10_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c ++ ++unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c ++ ++unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c ++ ++unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c ++ ++unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c ++ ++unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c ++ ++unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c ++ ++unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c ++ ++unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c ++ ++unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c ++ ++unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c ++ ++unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c ++ ++unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c ++ ++void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c ++ ++void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c ++ ++unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c ++ ++unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c ++ ++unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c ++ ++unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c( ++ const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_12_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_12_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_12_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x16 \ ++ vpx_highbd_12_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x32 \ ++ vpx_highbd_12_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance16x8 \ ++ vpx_highbd_12_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x16 \ ++ vpx_highbd_12_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x32 \ ++ vpx_highbd_12_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance32x64 \ ++ vpx_highbd_12_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x4 \ ++ vpx_highbd_12_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance4x8 \ ++ vpx_highbd_12_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x32 \ ++ vpx_highbd_12_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance64x64 \ ++ vpx_highbd_12_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x16 \ ++ vpx_highbd_12_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x4 \ ++ vpx_highbd_12_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_12_sub_pixel_variance8x8 \ ++ vpx_highbd_12_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c ++ ++unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c ++ ++unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c ++ ++unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c ++ ++unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c ++ ++unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c ++ ++unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c ++ ++unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c ++ ++unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c ++ ++unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c ++ ++unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c ++ ++unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c ++ ++unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c ++ ++void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c ++ ++void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse, ++ int* sum); ++#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c ++ ++unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c ++ ++unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c ++ ++unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c ++ ++unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance16x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance32x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance4x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x32 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance64x64 \ ++ vpx_highbd_8_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x16 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x4 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_highbd_8_sub_pixel_avg_variance8x8 \ ++ vpx_highbd_8_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x16 \ ++ vpx_highbd_8_sub_pixel_variance16x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x32 \ ++ vpx_highbd_8_sub_pixel_variance16x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance16x8 \ ++ vpx_highbd_8_sub_pixel_variance16x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x16 \ ++ vpx_highbd_8_sub_pixel_variance32x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x32 \ ++ vpx_highbd_8_sub_pixel_variance32x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance32x64 \ ++ vpx_highbd_8_sub_pixel_variance32x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x32 \ ++ vpx_highbd_8_sub_pixel_variance64x32_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance64x64 \ ++ vpx_highbd_8_sub_pixel_variance64x64_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x16 \ ++ vpx_highbd_8_sub_pixel_variance8x16_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c ++ ++uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c ++ ++unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c ++ ++unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c ++ ++unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c ++ ++unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c ++ ++unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c ++ ++unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c ++ ++unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c ++ ++unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c ++ ++unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c ++ ++unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c ++ ++unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c ++ ++unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c ++ ++unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c ++ ++unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c ++ ++unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p); ++#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c ++ ++void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred, ++ const uint16_t* pred, ++ int width, ++ int height, ++ const uint16_t* ref, ++ int ref_stride); ++#define vpx_highbd_comp_avg_pred vpx_highbd_comp_avg_pred_c ++ ++void vpx_highbd_convolve8_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8 vpx_highbd_convolve8_c ++ ++void vpx_highbd_convolve8_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_c ++ ++void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_c ++ ++void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_c ++ ++void vpx_highbd_convolve8_horiz_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_c ++ ++void vpx_highbd_convolve8_vert_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_c ++ ++void vpx_highbd_convolve_avg_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_c ++ ++void vpx_highbd_convolve_copy_c(const uint16_t* src, ++ ptrdiff_t src_stride, ++ uint16_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h, ++ int bd); ++#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_c ++ ++void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c ++ ++void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c ++ ++void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c ++ ++void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c ++ ++void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_c ++ ++void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_c ++ ++void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_c ++ ++void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_c ++ ++void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c ++ ++void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c ++ ++void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c ++ ++void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c ++ ++void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c ++ ++void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c ++ ++void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c ++ ++void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c ++ ++void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_c ++ ++void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_c ++ ++void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_c ++ ++void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_c ++ ++void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c ++ ++void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c ++ ++void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c ++ ++void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c ++ ++void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_c ++ ++void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_c ++ ++void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_c ++ ++void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_c ++ ++void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_16x16 vpx_highbd_dc_left_predictor_16x16_c ++ ++void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_32x32 vpx_highbd_dc_left_predictor_32x32_c ++ ++void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_c ++ ++void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_c ++ ++void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_c ++ ++void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_c ++ ++void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_c ++ ++void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_c ++ ++void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_c ++ ++void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_c ++ ++void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_c ++ ++void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_dc_top_predictor_8x8 vpx_highbd_dc_top_predictor_8x8_c ++ ++void vpx_highbd_fdct16x16_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16 vpx_highbd_fdct16x16_c ++ ++void vpx_highbd_fdct16x16_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct16x16_1 vpx_highbd_fdct16x16_1_c ++ ++void vpx_highbd_fdct32x32_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32 vpx_highbd_fdct32x32_c ++ ++void vpx_highbd_fdct32x32_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_1 vpx_highbd_fdct32x32_1_c ++ ++void vpx_highbd_fdct32x32_rd_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct32x32_rd vpx_highbd_fdct32x32_rd_c ++ ++void vpx_highbd_fdct4x4_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct4x4 vpx_highbd_fdct4x4_c ++ ++void vpx_highbd_fdct8x8_c(const int16_t* input, tran_low_t* output, int stride); ++#define vpx_highbd_fdct8x8 vpx_highbd_fdct8x8_c ++ ++void vpx_highbd_fdct8x8_1_c(const int16_t* input, ++ tran_low_t* output, ++ int stride); ++#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c ++ ++void vpx_highbd_h_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_c ++ ++void vpx_highbd_h_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_c ++ ++void vpx_highbd_h_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_c ++ ++void vpx_highbd_h_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_c ++ ++void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c ++ ++void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c ++ ++void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff, ++ ptrdiff_t src_stride, ++ tran_low_t* coeff); ++#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c ++ ++void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_10_add vpx_highbd_idct16x16_10_add_c ++ ++void vpx_highbd_idct16x16_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_1_add vpx_highbd_idct16x16_1_add_c ++ ++void vpx_highbd_idct16x16_256_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_256_add vpx_highbd_idct16x16_256_add_c ++ ++void vpx_highbd_idct16x16_38_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct16x16_38_add vpx_highbd_idct16x16_38_add_c ++ ++void vpx_highbd_idct32x32_1024_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1024_add vpx_highbd_idct32x32_1024_add_c ++ ++void vpx_highbd_idct32x32_135_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_135_add vpx_highbd_idct32x32_135_add_c ++ ++void vpx_highbd_idct32x32_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_1_add vpx_highbd_idct32x32_1_add_c ++ ++void vpx_highbd_idct32x32_34_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct32x32_34_add vpx_highbd_idct32x32_34_add_c ++ ++void vpx_highbd_idct4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_16_add vpx_highbd_idct4x4_16_add_c ++ ++void vpx_highbd_idct4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct4x4_1_add vpx_highbd_idct4x4_1_add_c ++ ++void vpx_highbd_idct8x8_12_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_12_add vpx_highbd_idct8x8_12_add_c ++ ++void vpx_highbd_idct8x8_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_1_add vpx_highbd_idct8x8_1_add_c ++ ++void vpx_highbd_idct8x8_64_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_idct8x8_64_add vpx_highbd_idct8x8_64_add_c ++ ++void vpx_highbd_iwht4x4_16_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_16_add vpx_highbd_iwht4x4_16_add_c ++ ++void vpx_highbd_iwht4x4_1_add_c(const tran_low_t* input, ++ uint16_t* dest, ++ int stride, ++ int bd); ++#define vpx_highbd_iwht4x4_1_add vpx_highbd_iwht4x4_1_add_c ++ ++void vpx_highbd_lpf_horizontal_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16 vpx_highbd_lpf_horizontal_16_c ++ ++void vpx_highbd_lpf_horizontal_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_16_dual vpx_highbd_lpf_horizontal_16_dual_c ++ ++void vpx_highbd_lpf_horizontal_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4 vpx_highbd_lpf_horizontal_4_c ++ ++void vpx_highbd_lpf_horizontal_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_4_dual vpx_highbd_lpf_horizontal_4_dual_c ++ ++void vpx_highbd_lpf_horizontal_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8 vpx_highbd_lpf_horizontal_8_c ++ ++void vpx_highbd_lpf_horizontal_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_horizontal_8_dual vpx_highbd_lpf_horizontal_8_dual_c ++ ++void vpx_highbd_lpf_vertical_16_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16 vpx_highbd_lpf_vertical_16_c ++ ++void vpx_highbd_lpf_vertical_16_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_16_dual vpx_highbd_lpf_vertical_16_dual_c ++ ++void vpx_highbd_lpf_vertical_4_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_4 vpx_highbd_lpf_vertical_4_c ++ ++void vpx_highbd_lpf_vertical_4_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_4_dual vpx_highbd_lpf_vertical_4_dual_c ++ ++void vpx_highbd_lpf_vertical_8_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh, ++ int bd); ++#define vpx_highbd_lpf_vertical_8 vpx_highbd_lpf_vertical_8_c ++ ++void vpx_highbd_lpf_vertical_8_dual_c(uint16_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1, ++ int bd); ++#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_c ++ ++void vpx_highbd_minmax_8x8_c(const uint8_t* s8, ++ int p, ++ const uint8_t* d8, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_highbd_minmax_8x8 vpx_highbd_minmax_8x8_c ++ ++void vpx_highbd_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vpx_highbd_quantize_b vpx_highbd_quantize_b_c ++ ++void vpx_highbd_quantize_b_32x32_c( ++ const tran_low_t* coeff_ptr, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vpx_highbd_quantize_b_32x32 vpx_highbd_quantize_b_32x32_c ++ ++unsigned int vpx_highbd_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x16 vpx_highbd_sad16x16_c ++ ++unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x16_avg vpx_highbd_sad16x16_avg_c ++ ++void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c ++ ++unsigned int vpx_highbd_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x32 vpx_highbd_sad16x32_c ++ ++unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x32_avg vpx_highbd_sad16x32_avg_c ++ ++void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c ++ ++unsigned int vpx_highbd_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad16x8 vpx_highbd_sad16x8_c ++ ++unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad16x8_avg vpx_highbd_sad16x8_avg_c ++ ++void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c ++ ++unsigned int vpx_highbd_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x16 vpx_highbd_sad32x16_c ++ ++unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x16_avg vpx_highbd_sad32x16_avg_c ++ ++void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c ++ ++unsigned int vpx_highbd_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x32 vpx_highbd_sad32x32_c ++ ++unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x32_avg vpx_highbd_sad32x32_avg_c ++ ++void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c ++ ++unsigned int vpx_highbd_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad32x64 vpx_highbd_sad32x64_c ++ ++unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad32x64_avg vpx_highbd_sad32x64_avg_c ++ ++void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c ++ ++unsigned int vpx_highbd_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x4 vpx_highbd_sad4x4_c ++ ++unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x4_avg vpx_highbd_sad4x4_avg_c ++ ++void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c ++ ++unsigned int vpx_highbd_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad4x8 vpx_highbd_sad4x8_c ++ ++unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad4x8_avg vpx_highbd_sad4x8_avg_c ++ ++void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c ++ ++unsigned int vpx_highbd_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x32 vpx_highbd_sad64x32_c ++ ++unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x32_avg vpx_highbd_sad64x32_avg_c ++ ++void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c ++ ++unsigned int vpx_highbd_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad64x64 vpx_highbd_sad64x64_c ++ ++unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad64x64_avg vpx_highbd_sad64x64_avg_c ++ ++void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c ++ ++unsigned int vpx_highbd_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x16 vpx_highbd_sad8x16_c ++ ++unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x16_avg vpx_highbd_sad8x16_avg_c ++ ++void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c ++ ++unsigned int vpx_highbd_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x4 vpx_highbd_sad8x4_c ++ ++unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x4_avg vpx_highbd_sad8x4_avg_c ++ ++void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c ++ ++unsigned int vpx_highbd_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad8x8 vpx_highbd_sad8x8_c ++ ++unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_highbd_sad8x8_avg vpx_highbd_sad8x8_avg_c ++ ++void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c ++ ++unsigned int vpx_highbd_sad_skip_16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_16x16 vpx_highbd_sad_skip_16x16_c ++ ++void vpx_highbd_sad_skip_16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_16x16x4d vpx_highbd_sad_skip_16x16x4d_c ++ ++unsigned int vpx_highbd_sad_skip_16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_16x32 vpx_highbd_sad_skip_16x32_c ++ ++void vpx_highbd_sad_skip_16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_16x32x4d vpx_highbd_sad_skip_16x32x4d_c ++ ++unsigned int vpx_highbd_sad_skip_16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_16x8 vpx_highbd_sad_skip_16x8_c ++ ++void vpx_highbd_sad_skip_16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_16x8x4d vpx_highbd_sad_skip_16x8x4d_c ++ ++unsigned int vpx_highbd_sad_skip_32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_32x16 vpx_highbd_sad_skip_32x16_c ++ ++void vpx_highbd_sad_skip_32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_32x16x4d vpx_highbd_sad_skip_32x16x4d_c ++ ++unsigned int vpx_highbd_sad_skip_32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_32x32 vpx_highbd_sad_skip_32x32_c ++ ++void vpx_highbd_sad_skip_32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_32x32x4d vpx_highbd_sad_skip_32x32x4d_c ++ ++unsigned int vpx_highbd_sad_skip_32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_32x64 vpx_highbd_sad_skip_32x64_c ++ ++void vpx_highbd_sad_skip_32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_32x64x4d vpx_highbd_sad_skip_32x64x4d_c ++ ++unsigned int vpx_highbd_sad_skip_4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_4x4 vpx_highbd_sad_skip_4x4_c ++ ++void vpx_highbd_sad_skip_4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_4x4x4d vpx_highbd_sad_skip_4x4x4d_c ++ ++unsigned int vpx_highbd_sad_skip_4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_4x8 vpx_highbd_sad_skip_4x8_c ++ ++void vpx_highbd_sad_skip_4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_4x8x4d vpx_highbd_sad_skip_4x8x4d_c ++ ++unsigned int vpx_highbd_sad_skip_64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_64x32 vpx_highbd_sad_skip_64x32_c ++ ++void vpx_highbd_sad_skip_64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_64x32x4d vpx_highbd_sad_skip_64x32x4d_c ++ ++unsigned int vpx_highbd_sad_skip_64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_64x64 vpx_highbd_sad_skip_64x64_c ++ ++void vpx_highbd_sad_skip_64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_64x64x4d vpx_highbd_sad_skip_64x64x4d_c ++ ++unsigned int vpx_highbd_sad_skip_8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_8x16 vpx_highbd_sad_skip_8x16_c ++ ++void vpx_highbd_sad_skip_8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_8x16x4d vpx_highbd_sad_skip_8x16x4d_c ++ ++unsigned int vpx_highbd_sad_skip_8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_8x4 vpx_highbd_sad_skip_8x4_c ++ ++void vpx_highbd_sad_skip_8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_8x4x4d vpx_highbd_sad_skip_8x4x4d_c ++ ++unsigned int vpx_highbd_sad_skip_8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_highbd_sad_skip_8x8 vpx_highbd_sad_skip_8x8_c ++ ++void vpx_highbd_sad_skip_8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_highbd_sad_skip_8x8x4d vpx_highbd_sad_skip_8x8x4d_c ++ ++int vpx_highbd_satd_c(const tran_low_t* coeff, int length); ++#define vpx_highbd_satd vpx_highbd_satd_c ++ ++int64_t vpx_highbd_sse_c(const uint8_t* a8, ++ int a_stride, ++ const uint8_t* b8, ++ int b_stride, ++ int width, ++ int height); ++#define vpx_highbd_sse vpx_highbd_sse_c ++ ++void vpx_highbd_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src8_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred8_ptr, ++ ptrdiff_t pred_stride, ++ int bd); ++#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c ++ ++void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_c ++ ++void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_c ++ ++void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_c ++ ++void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_c ++ ++void vpx_highbd_v_predictor_16x16_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_c ++ ++void vpx_highbd_v_predictor_32x32_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_c ++ ++void vpx_highbd_v_predictor_4x4_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_c ++ ++void vpx_highbd_v_predictor_8x8_c(uint16_t* dst, ++ ptrdiff_t stride, ++ const uint16_t* above, ++ const uint16_t* left, ++ int bd); ++#define vpx_highbd_v_predictor_8x8 vpx_highbd_v_predictor_8x8_c ++ ++void vpx_idct16x16_10_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_10_add vpx_idct16x16_10_add_c ++ ++void vpx_idct16x16_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_1_add vpx_idct16x16_1_add_c ++ ++void vpx_idct16x16_256_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct16x16_256_add vpx_idct16x16_256_add_c ++ ++void vpx_idct16x16_38_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct16x16_38_add vpx_idct16x16_38_add_c ++ ++void vpx_idct32x32_1024_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_1024_add vpx_idct32x32_1024_add_c ++ ++void vpx_idct32x32_135_add_c(const tran_low_t* input, ++ uint8_t* dest, ++ int stride); ++#define vpx_idct32x32_135_add vpx_idct32x32_135_add_c ++ ++void vpx_idct32x32_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_1_add vpx_idct32x32_1_add_c ++ ++void vpx_idct32x32_34_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct32x32_34_add vpx_idct32x32_34_add_c ++ ++void vpx_idct4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_16_add vpx_idct4x4_16_add_c ++ ++void vpx_idct4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct4x4_1_add vpx_idct4x4_1_add_c ++ ++void vpx_idct8x8_12_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_12_add vpx_idct8x8_12_add_c ++ ++void vpx_idct8x8_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_1_add vpx_idct8x8_1_add_c ++ ++void vpx_idct8x8_64_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_idct8x8_64_add vpx_idct8x8_64_add_c ++ ++int16_t vpx_int_pro_col_c(const uint8_t* ref, const int width); ++#define vpx_int_pro_col vpx_int_pro_col_c ++ ++void vpx_int_pro_row_c(int16_t hbuf[16], ++ const uint8_t* ref, ++ const int ref_stride, ++ const int height); ++#define vpx_int_pro_row vpx_int_pro_row_c ++ ++void vpx_iwht4x4_16_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_c ++ ++void vpx_iwht4x4_1_add_c(const tran_low_t* input, uint8_t* dest, int stride); ++#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c ++ ++void vpx_lpf_horizontal_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16 vpx_lpf_horizontal_16_c ++ ++void vpx_lpf_horizontal_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_16_dual vpx_lpf_horizontal_16_dual_c ++ ++void vpx_lpf_horizontal_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_4 vpx_lpf_horizontal_4_c ++ ++void vpx_lpf_horizontal_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_4_dual vpx_lpf_horizontal_4_dual_c ++ ++void vpx_lpf_horizontal_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_horizontal_8 vpx_lpf_horizontal_8_c ++ ++void vpx_lpf_horizontal_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_horizontal_8_dual vpx_lpf_horizontal_8_dual_c ++ ++void vpx_lpf_vertical_16_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16 vpx_lpf_vertical_16_c ++ ++void vpx_lpf_vertical_16_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_16_dual vpx_lpf_vertical_16_dual_c ++ ++void vpx_lpf_vertical_4_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_4 vpx_lpf_vertical_4_c ++ ++void vpx_lpf_vertical_4_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_4_dual vpx_lpf_vertical_4_dual_c ++ ++void vpx_lpf_vertical_8_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit, ++ const uint8_t* limit, ++ const uint8_t* thresh); ++#define vpx_lpf_vertical_8 vpx_lpf_vertical_8_c ++ ++void vpx_lpf_vertical_8_dual_c(uint8_t* s, ++ int pitch, ++ const uint8_t* blimit0, ++ const uint8_t* limit0, ++ const uint8_t* thresh0, ++ const uint8_t* blimit1, ++ const uint8_t* limit1, ++ const uint8_t* thresh1); ++#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c ++ ++void vpx_mbpost_proc_across_ip_c(unsigned char* src, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_across_ip vpx_mbpost_proc_across_ip_c ++ ++void vpx_mbpost_proc_down_c(unsigned char* dst, ++ int pitch, ++ int rows, ++ int cols, ++ int flimit); ++#define vpx_mbpost_proc_down vpx_mbpost_proc_down_c ++ ++void vpx_minmax_8x8_c(const uint8_t* s, ++ int p, ++ const uint8_t* d, ++ int dp, ++ int* min, ++ int* max); ++#define vpx_minmax_8x8 vpx_minmax_8x8_c ++ ++unsigned int vpx_mse16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x16 vpx_mse16x16_c ++ ++unsigned int vpx_mse16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse16x8 vpx_mse16x8_c ++ ++unsigned int vpx_mse8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x16 vpx_mse8x16_c ++ ++unsigned int vpx_mse8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_mse8x8 vpx_mse8x8_c ++ ++void vpx_plane_add_noise_c(uint8_t* start, ++ const int8_t* noise, ++ int blackclamp, ++ int whiteclamp, ++ int width, ++ int height, ++ int pitch); ++#define vpx_plane_add_noise vpx_plane_add_noise_c ++ ++void vpx_post_proc_down_and_across_mb_row_c(unsigned char* src, ++ unsigned char* dst, ++ int src_pitch, ++ int dst_pitch, ++ int cols, ++ unsigned char* flimits, ++ int size); ++#define vpx_post_proc_down_and_across_mb_row \ ++ vpx_post_proc_down_and_across_mb_row_c ++ ++void vpx_quantize_b_c(const tran_low_t* coeff_ptr, ++ intptr_t n_coeffs, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vpx_quantize_b vpx_quantize_b_c ++ ++void vpx_quantize_b_32x32_c(const tran_low_t* coeff_ptr, ++ const struct macroblock_plane* const mb_plane, ++ tran_low_t* qcoeff_ptr, ++ tran_low_t* dqcoeff_ptr, ++ const int16_t* dequant_ptr, ++ uint16_t* eob_ptr, ++ const struct ScanOrder* const scan_order); ++#define vpx_quantize_b_32x32 vpx_quantize_b_32x32_c ++ ++unsigned int vpx_sad16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x16 vpx_sad16x16_c ++ ++unsigned int vpx_sad16x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x16_avg vpx_sad16x16_avg_c ++ ++void vpx_sad16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad16x16x4d vpx_sad16x16x4d_c ++ ++unsigned int vpx_sad16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x32 vpx_sad16x32_c ++ ++unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x32_avg vpx_sad16x32_avg_c ++ ++void vpx_sad16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad16x32x4d vpx_sad16x32x4d_c ++ ++unsigned int vpx_sad16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad16x8 vpx_sad16x8_c ++ ++unsigned int vpx_sad16x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad16x8_avg vpx_sad16x8_avg_c ++ ++void vpx_sad16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad16x8x4d vpx_sad16x8x4d_c ++ ++unsigned int vpx_sad32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x16 vpx_sad32x16_c ++ ++unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x16_avg vpx_sad32x16_avg_c ++ ++void vpx_sad32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad32x16x4d vpx_sad32x16x4d_c ++ ++unsigned int vpx_sad32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x32 vpx_sad32x32_c ++ ++unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x32_avg vpx_sad32x32_avg_c ++ ++void vpx_sad32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad32x32x4d vpx_sad32x32x4d_c ++ ++unsigned int vpx_sad32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad32x64 vpx_sad32x64_c ++ ++unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad32x64_avg vpx_sad32x64_avg_c ++ ++void vpx_sad32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad32x64x4d vpx_sad32x64x4d_c ++ ++unsigned int vpx_sad4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x4 vpx_sad4x4_c ++ ++unsigned int vpx_sad4x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x4_avg vpx_sad4x4_avg_c ++ ++void vpx_sad4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad4x4x4d vpx_sad4x4x4d_c ++ ++unsigned int vpx_sad4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad4x8 vpx_sad4x8_c ++ ++unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad4x8_avg vpx_sad4x8_avg_c ++ ++void vpx_sad4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad4x8x4d vpx_sad4x8x4d_c ++ ++unsigned int vpx_sad64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x32 vpx_sad64x32_c ++ ++unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x32_avg vpx_sad64x32_avg_c ++ ++void vpx_sad64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad64x32x4d vpx_sad64x32x4d_c ++ ++unsigned int vpx_sad64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad64x64 vpx_sad64x64_c ++ ++unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad64x64_avg vpx_sad64x64_avg_c ++ ++void vpx_sad64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad64x64x4d vpx_sad64x64x4d_c ++ ++unsigned int vpx_sad8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x16 vpx_sad8x16_c ++ ++unsigned int vpx_sad8x16_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x16_avg vpx_sad8x16_avg_c ++ ++void vpx_sad8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad8x16x4d vpx_sad8x16x4d_c ++ ++unsigned int vpx_sad8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x4 vpx_sad8x4_c ++ ++unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x4_avg vpx_sad8x4_avg_c ++ ++void vpx_sad8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad8x4x4d vpx_sad8x4x4d_c ++ ++unsigned int vpx_sad8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad8x8 vpx_sad8x8_c ++ ++unsigned int vpx_sad8x8_avg_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ const uint8_t* second_pred); ++#define vpx_sad8x8_avg vpx_sad8x8_avg_c ++ ++void vpx_sad8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad8x8x4d vpx_sad8x8x4d_c ++ ++unsigned int vpx_sad_skip_16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_16x16 vpx_sad_skip_16x16_c ++ ++void vpx_sad_skip_16x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_16x16x4d vpx_sad_skip_16x16x4d_c ++ ++unsigned int vpx_sad_skip_16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_16x32 vpx_sad_skip_16x32_c ++ ++void vpx_sad_skip_16x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_16x32x4d vpx_sad_skip_16x32x4d_c ++ ++unsigned int vpx_sad_skip_16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_16x8 vpx_sad_skip_16x8_c ++ ++void vpx_sad_skip_16x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_16x8x4d vpx_sad_skip_16x8x4d_c ++ ++unsigned int vpx_sad_skip_32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_32x16 vpx_sad_skip_32x16_c ++ ++void vpx_sad_skip_32x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_32x16x4d vpx_sad_skip_32x16x4d_c ++ ++unsigned int vpx_sad_skip_32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_32x32 vpx_sad_skip_32x32_c ++ ++void vpx_sad_skip_32x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_32x32x4d vpx_sad_skip_32x32x4d_c ++ ++unsigned int vpx_sad_skip_32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_32x64 vpx_sad_skip_32x64_c ++ ++void vpx_sad_skip_32x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_32x64x4d vpx_sad_skip_32x64x4d_c ++ ++unsigned int vpx_sad_skip_4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_4x4 vpx_sad_skip_4x4_c ++ ++void vpx_sad_skip_4x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_4x4x4d vpx_sad_skip_4x4x4d_c ++ ++unsigned int vpx_sad_skip_4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_4x8 vpx_sad_skip_4x8_c ++ ++void vpx_sad_skip_4x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_4x8x4d vpx_sad_skip_4x8x4d_c ++ ++unsigned int vpx_sad_skip_64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_64x32 vpx_sad_skip_64x32_c ++ ++void vpx_sad_skip_64x32x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_64x32x4d vpx_sad_skip_64x32x4d_c ++ ++unsigned int vpx_sad_skip_64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_64x64 vpx_sad_skip_64x64_c ++ ++void vpx_sad_skip_64x64x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_64x64x4d vpx_sad_skip_64x64x4d_c ++ ++unsigned int vpx_sad_skip_8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_8x16 vpx_sad_skip_8x16_c ++ ++void vpx_sad_skip_8x16x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_8x16x4d vpx_sad_skip_8x16x4d_c ++ ++unsigned int vpx_sad_skip_8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_8x4 vpx_sad_skip_8x4_c ++ ++void vpx_sad_skip_8x4x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_8x4x4d vpx_sad_skip_8x4x4d_c ++ ++unsigned int vpx_sad_skip_8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride); ++#define vpx_sad_skip_8x8 vpx_sad_skip_8x8_c ++ ++void vpx_sad_skip_8x8x4d_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* const ref_array[4], ++ int ref_stride, ++ uint32_t sad_array[4]); ++#define vpx_sad_skip_8x8x4d vpx_sad_skip_8x8x4d_c ++ ++int vpx_satd_c(const tran_low_t* coeff, int length); ++#define vpx_satd vpx_satd_c ++ ++void vpx_scaled_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_2d vpx_scaled_2d_c ++ ++void vpx_scaled_avg_2d_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c ++ ++void vpx_scaled_avg_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c ++ ++void vpx_scaled_avg_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c ++ ++void vpx_scaled_horiz_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_horiz vpx_scaled_horiz_c ++ ++void vpx_scaled_vert_c(const uint8_t* src, ++ ptrdiff_t src_stride, ++ uint8_t* dst, ++ ptrdiff_t dst_stride, ++ const InterpKernel* filter, ++ int x0_q4, ++ int x_step_q4, ++ int y0_q4, ++ int y_step_q4, ++ int w, ++ int h); ++#define vpx_scaled_vert vpx_scaled_vert_c ++ ++int64_t vpx_sse_c(const uint8_t* a, ++ int a_stride, ++ const uint8_t* b, ++ int b_stride, ++ int width, ++ int height); ++#define vpx_sse vpx_sse_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c ++ ++uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c ++ ++uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse, ++ const uint8_t* second_pred); ++#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c ++ ++uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c ++ ++uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c ++ ++uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c ++ ++uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c ++ ++uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c ++ ++uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c ++ ++uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c ++ ++uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c ++ ++uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c ++ ++uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c ++ ++uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c ++ ++uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c ++ ++uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ int x_offset, ++ int y_offset, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ uint32_t* sse); ++#define vpx_sub_pixel_variance8x8 vpx_sub_pixel_variance8x8_c ++ ++void vpx_subtract_block_c(int rows, ++ int cols, ++ int16_t* diff_ptr, ++ ptrdiff_t diff_stride, ++ const uint8_t* src_ptr, ++ ptrdiff_t src_stride, ++ const uint8_t* pred_ptr, ++ ptrdiff_t pred_stride); ++#define vpx_subtract_block vpx_subtract_block_c ++ ++uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size); ++#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c ++ ++void vpx_tm_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c ++ ++void vpx_tm_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c ++ ++void vpx_tm_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c ++ ++void vpx_tm_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c ++ ++void vpx_v_predictor_16x16_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c ++ ++void vpx_v_predictor_32x32_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c ++ ++void vpx_v_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c ++ ++void vpx_v_predictor_8x8_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c ++ ++unsigned int vpx_variance16x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x16 vpx_variance16x16_c ++ ++unsigned int vpx_variance16x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x32 vpx_variance16x32_c ++ ++unsigned int vpx_variance16x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance16x8 vpx_variance16x8_c ++ ++unsigned int vpx_variance32x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x16 vpx_variance32x16_c ++ ++unsigned int vpx_variance32x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x32 vpx_variance32x32_c ++ ++unsigned int vpx_variance32x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance32x64 vpx_variance32x64_c ++ ++unsigned int vpx_variance4x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x4 vpx_variance4x4_c ++ ++unsigned int vpx_variance4x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance4x8 vpx_variance4x8_c ++ ++unsigned int vpx_variance64x32_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x32 vpx_variance64x32_c ++ ++unsigned int vpx_variance64x64_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance64x64 vpx_variance64x64_c ++ ++unsigned int vpx_variance8x16_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x16 vpx_variance8x16_c ++ ++unsigned int vpx_variance8x4_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x4 vpx_variance8x4_c ++ ++unsigned int vpx_variance8x8_c(const uint8_t* src_ptr, ++ int src_stride, ++ const uint8_t* ref_ptr, ++ int ref_stride, ++ unsigned int* sse); ++#define vpx_variance8x8 vpx_variance8x8_c ++ ++void vpx_ve_predictor_4x4_c(uint8_t* dst, ++ ptrdiff_t stride, ++ const uint8_t* above, ++ const uint8_t* left); ++#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c ++ ++int vpx_vector_var_c(const int16_t* ref, const int16_t* src, const int bwl); ++#define vpx_vector_var vpx_vector_var_c ++ ++void vpx_dsp_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif +Index: chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_scale_rtcd.h +=================================================================== +--- /dev/null ++++ chromium-121.0.6167.75/third_party/libvpx/source/config/linux/ppc64/vpx_scale_rtcd.h +@@ -0,0 +1,96 @@ ++// This file is generated. Do not edit. ++#ifndef VPX_SCALE_RTCD_H_ ++#define VPX_SCALE_RTCD_H_ ++ ++#ifdef RTCD_C ++#define RTCD_EXTERN ++#else ++#define RTCD_EXTERN extern ++#endif ++ ++struct yv12_buffer_config; ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++void vp8_horizontal_line_2_1_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c ++ ++void vp8_horizontal_line_5_3_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c ++ ++void vp8_horizontal_line_5_4_scale_c(const unsigned char* source, ++ unsigned int source_width, ++ unsigned char* dest, ++ unsigned int dest_width); ++#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c ++ ++void vp8_vertical_band_2_1_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c ++ ++void vp8_vertical_band_2_1_scale_i_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c ++ ++void vp8_vertical_band_5_3_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c ++ ++void vp8_vertical_band_5_4_scale_c(unsigned char* source, ++ unsigned int src_pitch, ++ unsigned char* dest, ++ unsigned int dest_pitch, ++ unsigned int dest_width); ++#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c ++ ++void vp8_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c ++ ++void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c ++ ++void vpx_extend_frame_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_borders vpx_extend_frame_borders_c ++ ++void vpx_extend_frame_inner_borders_c(struct yv12_buffer_config* ybf); ++#define vpx_extend_frame_inner_borders vpx_extend_frame_inner_borders_c ++ ++void vpx_yv12_copy_frame_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_frame vpx_yv12_copy_frame_c ++ ++void vpx_yv12_copy_y_c(const struct yv12_buffer_config* src_ybc, ++ struct yv12_buffer_config* dst_ybc); ++#define vpx_yv12_copy_y vpx_yv12_copy_y_c ++ ++void vpx_scale_rtcd(void); ++ ++#include "vpx_config.h" ++ ++#ifdef RTCD_C ++static void setup_rtcd_internal(void) {} ++#endif ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif ++ ++#endif diff --git a/0003-third_party-libvpx-Add-ppc64-vsx-files.patch b/0003-third_party-libvpx-Add-ppc64-vsx-files.patch new file mode 100644 index 0000000..80ecf75 --- /dev/null +++ b/0003-third_party-libvpx-Add-ppc64-vsx-files.patch @@ -0,0 +1,35 @@ +--- a/third_party/libvpx/libvpx_srcs.gni ++++ b/third_party/libvpx/libvpx_srcs.gni +@@ -4302,6 +4302,7 @@ + "//third_party/libvpx/source/libvpx/vp9/common/vp9_seg_common.c", + "//third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c", + "//third_party/libvpx/source/libvpx/vp9/common/vp9_tile_common.c", ++ "//third_party/libvpx/source/libvpx/vp9/common/ppc/vp9_idct_vsx.c", + "//third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c", + "//third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodemv.c", + "//third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c", +@@ -4340,6 +4341,7 @@ + "//third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c", + "//third_party/libvpx/source/libvpx/vp9/encoder/vp9_tokenize.c", + "//third_party/libvpx/source/libvpx/vp9/encoder/vp9_treewriter.c", ++ "//third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c", + "//third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c", + "//third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c", + "//third_party/libvpx/source/libvpx/vp9/vp9_iface_common.c", +@@ -4368,6 +4370,16 @@ + "//third_party/libvpx/source/libvpx/vpx_dsp/variance.c", + "//third_party/libvpx/source/libvpx/vpx_dsp/vpx_convolve.c", + "//third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/deblock_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/fdct32x32_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/hadamard_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/intrapred_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/inv_txfm_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/quantize_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/sad_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/subtract_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c", ++ "//third_party/libvpx/source/libvpx/vpx_dsp/ppc/vpx_convolve_vsx.c", + "//third_party/libvpx/source/libvpx/vpx_mem/vpx_mem.c", + "//third_party/libvpx/source/libvpx/vpx_ports/ppc_cpudetect.c", + "//third_party/libvpx/source/libvpx/vpx_scale/generic/gen_scalers.c", diff --git a/0003-thirdparty-fix-dav1d-gn.patch b/0003-thirdparty-fix-dav1d-gn.patch new file mode 100644 index 0000000..9f65f9f --- /dev/null +++ b/0003-thirdparty-fix-dav1d-gn.patch @@ -0,0 +1,43 @@ +Index: chromium-120.0.6099.71/third_party/dav1d/config/linux/ppc64/config.h +=================================================================== +--- /dev/null ++++ chromium-120.0.6099.71/third_party/dav1d/config/linux/ppc64/config.h +@@ -0,0 +1,38 @@ ++/* ++ * Autogenerated by the Meson build system. ++ * Do not edit, your changes will be lost. ++ */ ++ ++#pragma once ++ ++#define ARCH_AARCH64 0 ++ ++#define ARCH_ARM 0 ++ ++#define ARCH_PPC64LE 1 ++ ++#define ARCH_X86 0 ++ ++#define ARCH_X86_32 0 ++ ++#define ARCH_X86_64 0 ++ ++#define CONFIG_16BPC 1 ++ ++#define CONFIG_8BPC 1 ++ ++// #define CONFIG_LOG 1 -- Logging is controlled by Chromium ++ ++#define ENDIANNESS_BIG 0 ++ ++#define HAVE_ASM 1 ++ ++#define HAVE_CLOCK_GETTIME 1 ++ ++#define HAVE_DLSYM 1 ++ ++#define HAVE_GETAUXVAL 1 ++ ++#define HAVE_POSIX_MEMALIGN 1 ++ ++#define HAVE_UNISTD_H 1 diff --git a/0004-sandbox-linux-system_headers-Update-linux-signal-hea.patch b/0004-sandbox-linux-system_headers-Update-linux-signal-hea.patch new file mode 100644 index 0000000..117fe97 --- /dev/null +++ b/0004-sandbox-linux-system_headers-Update-linux-signal-hea.patch @@ -0,0 +1,23 @@ +From 298df3dc44f7121cd8cb9a06b29fa3b16c959b8d Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 19:13:25 -0500 +Subject: [PATCH 4/4] sandbox/linux/system_headers: Update linux signal header + for ppc64 + +--- + sandbox/linux/system_headers/linux_signal.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/linux_signal.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/system_headers/linux_signal.h ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/linux_signal.h +@@ -13,7 +13,7 @@ + // (not undefined, but defined different values and in different memory + // layouts). So, fill the gap here. + #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + + #define LINUX_SIGHUP 1 + #define LINUX_SIGINT 2 diff --git a/0004-third_party-crashpad-port-curl-transport-ppc64.patch b/0004-third_party-crashpad-port-curl-transport-ppc64.patch new file mode 100644 index 0000000..11d54b7 --- /dev/null +++ b/0004-third_party-crashpad-port-curl-transport-ppc64.patch @@ -0,0 +1,17 @@ +Index: chromium-120.0.6099.71/third_party/crashpad/crashpad/util/net/http_transport_libcurl.cc +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/crashpad/crashpad/util/net/http_transport_libcurl.cc ++++ chromium-120.0.6099.71/third_party/crashpad/crashpad/util/net/http_transport_libcurl.cc +@@ -239,6 +239,12 @@ std::string UserAgent() { + #endif + #elif defined (ARCH_CPU_RISCV64) + static constexpr char arch[] = "riscv64"; ++#elif defined(__powerpc64__) ++#if defined(ARCH_CPU_LITTLE_ENDIAN) ++ static constexpr char arch[] = "ppc64"; ++#elif defined(ARCH_CPU_BIG_ENDIAN) ++ static constexpr char arch[] = "ppc64_be"; ++#endif + #else + #error Port + #endif diff --git a/0004-third_party-libvpx-work-around-ambiguous-vsx.patch b/0004-third_party-libvpx-work-around-ambiguous-vsx.patch new file mode 100644 index 0000000..1dd35ff --- /dev/null +++ b/0004-third_party-libvpx-work-around-ambiguous-vsx.patch @@ -0,0 +1,376 @@ +Index: chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c ++++ chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c +@@ -38,6 +38,28 @@ static INLINE int16x8_t vec_max_across(i + return vec_max(a, vec_perm(a, a, vec_perm16)); + } + ++static INLINE void ++vec_u64_store(vector unsigned long long vecu64, unsigned long offset, void *ptr) ++{ ++#ifndef WORDS_BIGENDIAN ++ __asm__ ("xxswapd %x0, %x1" ++ : "=wa" (vecu64) ++ : "wa" (vecu64)); ++#endif ++#if __GNUC__ >= 4 ++ if (__builtin_constant_p (offset) && offset == 0) ++ __asm__ ("stxvd2x %x0,0,%1\n\t" ++ : ++ : "wa" (vecu64), "r" ((uintptr_t)ptr) ++ : "memory"); ++ else ++#endif ++ __asm__ ("stxvd2x %x0,%1,%2\n\t" ++ : ++ : "wa" (vecu64), "r" (offset), "r" ((uintptr_t)ptr) ++ : "memory", "r0"); ++} ++ + void vp9_quantize_fp_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs, + const int16_t *round_ptr, const int16_t *quant_ptr, + tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, +@@ -60,10 +82,10 @@ void vp9_quantize_fp_vsx(const tran_low_ + qcoeff0 = vec_mulhi(vec_vaddshs(vec_abs(coeff0), round), quant); + zero_coeff0 = vec_cmpeq(qcoeff0, vec_zeros_s16); + qcoeff0 = vec_sign(qcoeff0, coeff0); +- vec_vsx_st(qcoeff0, 0, qcoeff_ptr); ++ vec_u64_store(qcoeff0, 0, qcoeff_ptr); + + dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff0, 0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, 0, dqcoeff_ptr); + + // Remove DC value from round and quant + round = vec_splat(round, 1); +@@ -76,10 +98,10 @@ void vp9_quantize_fp_vsx(const tran_low_ + qcoeff1 = vec_mulhi(vec_vaddshs(vec_abs(coeff1), round), quant); + zero_coeff1 = vec_cmpeq(qcoeff1, vec_zeros_s16); + qcoeff1 = vec_sign(qcoeff1, coeff1); +- vec_vsx_st(qcoeff1, 16, qcoeff_ptr); ++ vec_u64_store(qcoeff1, 16, qcoeff_ptr); + + dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff1, 16, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, 16, dqcoeff_ptr); + + eob = vec_max(vec_or(scan0, zero_coeff0), vec_or(scan1, zero_coeff1)); + +@@ -107,23 +129,23 @@ void vp9_quantize_fp_vsx(const tran_low_ + qcoeff0 = vec_mulhi(vec_vaddshs(vec_abs(coeff0), round), quant); + zero_coeff0 = vec_cmpeq(qcoeff0, vec_zeros_s16); + qcoeff0 = vec_sign(qcoeff0, coeff0); +- vec_vsx_st(qcoeff0, off0, qcoeff_ptr); ++ vec_u64_store(qcoeff0, off0, qcoeff_ptr); + dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff0, off0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, off0, dqcoeff_ptr); + + qcoeff1 = vec_mulhi(vec_vaddshs(vec_abs(coeff1), round), quant); + zero_coeff1 = vec_cmpeq(qcoeff1, vec_zeros_s16); + qcoeff1 = vec_sign(qcoeff1, coeff1); +- vec_vsx_st(qcoeff1, off1, qcoeff_ptr); ++ vec_u64_store(qcoeff1, off1, qcoeff_ptr); + dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff1, off1, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, off1, dqcoeff_ptr); + + qcoeff2 = vec_mulhi(vec_vaddshs(vec_abs(coeff2), round), quant); + zero_coeff2 = vec_cmpeq(qcoeff2, vec_zeros_s16); + qcoeff2 = vec_sign(qcoeff2, coeff2); +- vec_vsx_st(qcoeff2, off2, qcoeff_ptr); ++ vec_u64_store(qcoeff2, off2, qcoeff_ptr); + dqcoeff2 = vec_mladd(qcoeff2, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff2, off2, dqcoeff_ptr); ++ vec_u64_store(dqcoeff2, off2, dqcoeff_ptr); + + eob = vec_max(eob, vec_or(scan0, zero_coeff0)); + eob2 = vec_max(vec_or(scan1, zero_coeff1), vec_or(scan2, zero_coeff2)); +@@ -200,10 +222,10 @@ void vp9_quantize_fp_32x32_vsx(const tra + qcoeff0 = vec_and(qcoeff0, mask0); + zero_coeff0 = vec_cmpeq(qcoeff0, vec_zeros_s16); + qcoeff0 = vec_sign(qcoeff0, coeff0); +- vec_vsx_st(qcoeff0, 0, qcoeff_ptr); ++ vec_u64_store(qcoeff0, 0, qcoeff_ptr); + + dqcoeff0 = dequantize_coeff_32(qcoeff0, dequant); +- vec_vsx_st(dqcoeff0, 0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, 0, dqcoeff_ptr); + + // Remove DC value from thres, round, quant and dequant + thres = vec_splat(thres, 1); +@@ -219,10 +241,10 @@ void vp9_quantize_fp_32x32_vsx(const tra + qcoeff1 = vec_and(qcoeff1, mask1); + zero_coeff1 = vec_cmpeq(qcoeff1, vec_zeros_s16); + qcoeff1 = vec_sign(qcoeff1, coeff1); +- vec_vsx_st(qcoeff1, 16, qcoeff_ptr); ++ vec_u64_store(qcoeff1, 16, qcoeff_ptr); + + dqcoeff1 = dequantize_coeff_32(qcoeff1, dequant); +- vec_vsx_st(dqcoeff1, 16, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, 16, dqcoeff_ptr); + + eob = vec_max(vec_or(scan0, zero_coeff0), vec_or(scan1, zero_coeff1)); + +@@ -260,17 +282,17 @@ void vp9_quantize_fp_32x32_vsx(const tra + qcoeff1 = vec_sign(qcoeff1, coeff1); + qcoeff2 = vec_sign(qcoeff2, coeff2); + +- vec_vsx_st(qcoeff0, off0, qcoeff_ptr); +- vec_vsx_st(qcoeff1, off1, qcoeff_ptr); +- vec_vsx_st(qcoeff2, off2, qcoeff_ptr); ++ vec_u64_store(qcoeff0, off0, qcoeff_ptr); ++ vec_u64_store(qcoeff1, off1, qcoeff_ptr); ++ vec_u64_store(qcoeff2, off2, qcoeff_ptr); + + dqcoeff0 = dequantize_coeff_32(qcoeff0, dequant); + dqcoeff1 = dequantize_coeff_32(qcoeff1, dequant); + dqcoeff2 = dequantize_coeff_32(qcoeff2, dequant); + +- vec_vsx_st(dqcoeff0, off0, dqcoeff_ptr); +- vec_vsx_st(dqcoeff1, off1, dqcoeff_ptr); +- vec_vsx_st(dqcoeff2, off2, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, off0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, off1, dqcoeff_ptr); ++ vec_u64_store(dqcoeff2, off2, dqcoeff_ptr); + + eob = vec_max(eob, vec_or(scan0, zero_coeff0)); + eob2 = vec_max(vec_or(scan1, zero_coeff1), vec_or(scan2, zero_coeff2)); +Index: chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vpx_dsp/ppc/fdct32x32_vsx.c +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/source/libvpx/vpx_dsp/ppc/fdct32x32_vsx.c ++++ chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vpx_dsp/ppc/fdct32x32_vsx.c +@@ -15,6 +15,28 @@ + #include "vpx_dsp/ppc/txfm_common_vsx.h" + #include "vpx_dsp/ppc/types_vsx.h" + ++static INLINE void ++vec_u64_store(vector unsigned long long vecu64, unsigned long offset, void *ptr) ++{ ++#ifndef WORDS_BIGENDIAN ++ __asm__ ("xxswapd %x0, %x1" ++ : "=wa" (vecu64) ++ : "wa" (vecu64)); ++#endif ++#if __GNUC__ >= 4 ++ if (__builtin_constant_p (offset) && offset == 0) ++ __asm__ ("stxvd2x %x0,0,%1\n\t" ++ : ++ : "wa" (vecu64), "r" ((uintptr_t)ptr) ++ : "memory"); ++ else ++#endif ++ __asm__ ("stxvd2x %x0,%1,%2\n\t" ++ : ++ : "wa" (vecu64), "r" (offset), "r" ((uintptr_t)ptr) ++ : "memory", "r0"); ++} ++ + // Returns ((a +/- b) * cospi16 + (2 << 13)) >> 14. + static INLINE void single_butterfly(int16x8_t a, int16x8_t b, int16x8_t *add, + int16x8_t *sub) { +@@ -164,45 +186,45 @@ static INLINE void load(const int16_t *a + } + + static INLINE void store(tran_low_t *a, const int16x8_t *b) { +- vec_vsx_st(b[0], 0, a); +- vec_vsx_st(b[8], 0, a + 8); +- vec_vsx_st(b[16], 0, a + 16); +- vec_vsx_st(b[24], 0, a + 24); +- +- vec_vsx_st(b[1], 0, a + 32); +- vec_vsx_st(b[9], 0, a + 40); +- vec_vsx_st(b[17], 0, a + 48); +- vec_vsx_st(b[25], 0, a + 56); +- +- vec_vsx_st(b[2], 0, a + 64); +- vec_vsx_st(b[10], 0, a + 72); +- vec_vsx_st(b[18], 0, a + 80); +- vec_vsx_st(b[26], 0, a + 88); +- +- vec_vsx_st(b[3], 0, a + 96); +- vec_vsx_st(b[11], 0, a + 104); +- vec_vsx_st(b[19], 0, a + 112); +- vec_vsx_st(b[27], 0, a + 120); +- +- vec_vsx_st(b[4], 0, a + 128); +- vec_vsx_st(b[12], 0, a + 136); +- vec_vsx_st(b[20], 0, a + 144); +- vec_vsx_st(b[28], 0, a + 152); +- +- vec_vsx_st(b[5], 0, a + 160); +- vec_vsx_st(b[13], 0, a + 168); +- vec_vsx_st(b[21], 0, a + 176); +- vec_vsx_st(b[29], 0, a + 184); +- +- vec_vsx_st(b[6], 0, a + 192); +- vec_vsx_st(b[14], 0, a + 200); +- vec_vsx_st(b[22], 0, a + 208); +- vec_vsx_st(b[30], 0, a + 216); +- +- vec_vsx_st(b[7], 0, a + 224); +- vec_vsx_st(b[15], 0, a + 232); +- vec_vsx_st(b[23], 0, a + 240); +- vec_vsx_st(b[31], 0, a + 248); ++ vec_u64_store(b[0], 0, a); ++ vec_u64_store(b[8], 0, a + 8); ++ vec_u64_store(b[16], 0, a + 16); ++ vec_u64_store(b[24], 0, a + 24); ++ ++ vec_u64_store(b[1], 0, a + 32); ++ vec_u64_store(b[9], 0, a + 40); ++ vec_u64_store(b[17], 0, a + 48); ++ vec_u64_store(b[25], 0, a + 56); ++ ++ vec_u64_store(b[2], 0, a + 64); ++ vec_u64_store(b[10], 0, a + 72); ++ vec_u64_store(b[18], 0, a + 80); ++ vec_u64_store(b[26], 0, a + 88); ++ ++ vec_u64_store(b[3], 0, a + 96); ++ vec_u64_store(b[11], 0, a + 104); ++ vec_u64_store(b[19], 0, a + 112); ++ vec_u64_store(b[27], 0, a + 120); ++ ++ vec_u64_store(b[4], 0, a + 128); ++ vec_u64_store(b[12], 0, a + 136); ++ vec_u64_store(b[20], 0, a + 144); ++ vec_u64_store(b[28], 0, a + 152); ++ ++ vec_u64_store(b[5], 0, a + 160); ++ vec_u64_store(b[13], 0, a + 168); ++ vec_u64_store(b[21], 0, a + 176); ++ vec_u64_store(b[29], 0, a + 184); ++ ++ vec_u64_store(b[6], 0, a + 192); ++ vec_u64_store(b[14], 0, a + 200); ++ vec_u64_store(b[22], 0, a + 208); ++ vec_u64_store(b[30], 0, a + 216); ++ ++ vec_u64_store(b[7], 0, a + 224); ++ vec_u64_store(b[15], 0, a + 232); ++ vec_u64_store(b[23], 0, a + 240); ++ vec_u64_store(b[31], 0, a + 248); + } + + // Returns 1 if negative 0 if positive +Index: chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vpx_dsp/ppc/quantize_vsx.c +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/source/libvpx/vpx_dsp/ppc/quantize_vsx.c ++++ chromium-120.0.6099.71/third_party/libvpx/source/libvpx/vpx_dsp/ppc/quantize_vsx.c +@@ -13,6 +13,28 @@ + #include "./vpx_dsp_rtcd.h" + #include "vpx_dsp/ppc/types_vsx.h" + ++static INLINE void ++vec_u64_store(vector unsigned long long vecu64, unsigned long offset, void *ptr) ++{ ++#ifndef WORDS_BIGENDIAN ++ __asm__ ("xxswapd %x0, %x1" ++ : "=wa" (vecu64) ++ : "wa" (vecu64)); ++#endif ++#if __GNUC__ >= 4 ++ if (__builtin_constant_p (offset) && offset == 0) ++ __asm__ ("stxvd2x %x0,0,%1\n\t" ++ : ++ : "wa" (vecu64), "r" ((uintptr_t)ptr) ++ : "memory"); ++ else ++#endif ++ __asm__ ("stxvd2x %x0,%1,%2\n\t" ++ : ++ : "wa" (vecu64), "r" (offset), "r" ((uintptr_t)ptr) ++ : "memory", "r0"); ++} ++ + // Negate 16-bit integers in a when the corresponding signed 16-bit + // integer in b is negative. + static INLINE int16x8_t vec_sign(int16x8_t a, int16x8_t b) { +@@ -124,19 +146,19 @@ void vpx_quantize_b_vsx(const tran_low_t + + qcoeff0 = + quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift, zero_mask0); +- vec_vsx_st(qcoeff0, 0, qcoeff_ptr); ++ vec_u64_store(qcoeff0, 0, qcoeff_ptr); + round = vec_splat(round, 1); + quant = vec_splat(quant, 1); + quant_shift = vec_splat(quant_shift, 1); + qcoeff1 = + quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift, zero_mask1); +- vec_vsx_st(qcoeff1, 16, qcoeff_ptr); ++ vec_u64_store(qcoeff1, 16, qcoeff_ptr); + + dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff0, 0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, 0, dqcoeff_ptr); + dequant = vec_splat(dequant, 1); + dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16); +- vec_vsx_st(dqcoeff1, 16, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, 16, dqcoeff_ptr); + + eob = vec_max(nonzero_scanindex(qcoeff0, iscan_ptr, 0), + nonzero_scanindex(qcoeff1, iscan_ptr, 16)); +@@ -164,17 +186,17 @@ void vpx_quantize_b_vsx(const tran_low_t + zero_mask1); + qcoeff2 = quantize_coeff(coeff2, coeff2_abs, round, quant, quant_shift, + zero_mask2); +- vec_vsx_st(qcoeff0, off0, qcoeff_ptr); +- vec_vsx_st(qcoeff1, off1, qcoeff_ptr); +- vec_vsx_st(qcoeff2, off2, qcoeff_ptr); ++ vec_u64_store(qcoeff0, off0, qcoeff_ptr); ++ vec_u64_store(qcoeff1, off1, qcoeff_ptr); ++ vec_u64_store(qcoeff2, off2, qcoeff_ptr); + + dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16); + dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16); + dqcoeff2 = vec_mladd(qcoeff2, dequant, vec_zeros_s16); + +- vec_vsx_st(dqcoeff0, off0, dqcoeff_ptr); +- vec_vsx_st(dqcoeff1, off1, dqcoeff_ptr); +- vec_vsx_st(dqcoeff2, off2, dqcoeff_ptr); ++ vec_u64_store(dqcoeff0, off0, dqcoeff_ptr); ++ vec_u64_store(dqcoeff1, off1, dqcoeff_ptr); ++ vec_u64_store(dqcoeff2, off2, dqcoeff_ptr); + + eob = vec_max(eob, nonzero_scanindex(qcoeff0, iscan_ptr, off0)); + eob2 = vec_max(nonzero_scanindex(qcoeff1, iscan_ptr, off1), +@@ -243,12 +265,12 @@ void vpx_quantize_b_32x32_vsx(const tran + qcoeff1 = quantize_coeff_32(coeff1, coeff1_abs, round, quant, quant_shift, + zero_mask1); + +- vec_vsx_st(qcoeff0, 0, qcoeff_ptr); +- vec_vsx_st(qcoeff1, 16, qcoeff_ptr); ++ vec_u64_store(qcoeff0, 0, qcoeff_ptr); ++ vec_u64_store(qcoeff1, 16, qcoeff_ptr); + +- vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), 0, dqcoeff_ptr); ++ vec_u64_store(dequantize_coeff_32(qcoeff0, dequant), 0, dqcoeff_ptr); + dequant = vec_splat(dequant, 1); // remove DC from dequant +- vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), 16, dqcoeff_ptr); ++ vec_u64_store(dequantize_coeff_32(qcoeff1, dequant), 16, dqcoeff_ptr); + + eob = vec_max(nonzero_scanindex(qcoeff0, iscan_ptr, 0), + nonzero_scanindex(qcoeff1, iscan_ptr, 16)); +@@ -276,13 +298,13 @@ void vpx_quantize_b_32x32_vsx(const tran + qcoeff2 = quantize_coeff_32(coeff2, coeff2_abs, round, quant, quant_shift, + zero_mask2); + +- vec_vsx_st(qcoeff0, off0, qcoeff_ptr); +- vec_vsx_st(qcoeff1, off1, qcoeff_ptr); +- vec_vsx_st(qcoeff2, off2, qcoeff_ptr); +- +- vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), off0, dqcoeff_ptr); +- vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), off1, dqcoeff_ptr); +- vec_vsx_st(dequantize_coeff_32(qcoeff2, dequant), off2, dqcoeff_ptr); ++ vec_u64_store(qcoeff0, off0, qcoeff_ptr); ++ vec_u64_store(qcoeff1, off1, qcoeff_ptr); ++ vec_u64_store(qcoeff2, off2, qcoeff_ptr); ++ ++ vec_u64_store(dequantize_coeff_32(qcoeff0, dequant), off0, dqcoeff_ptr); ++ vec_u64_store(dequantize_coeff_32(qcoeff1, dequant), off1, dqcoeff_ptr); ++ vec_u64_store(dequantize_coeff_32(qcoeff2, dequant), off2, dqcoeff_ptr); + + eob = vec_max(eob, nonzero_scanindex(qcoeff0, iscan_ptr, off0)); + eob2 = vec_max(nonzero_scanindex(qcoeff1, iscan_ptr, off1), diff --git a/0005-sandbox-linux-seccomp-bpf-Add-ppc64-syscall-stub.patch b/0005-sandbox-linux-seccomp-bpf-Add-ppc64-syscall-stub.patch new file mode 100644 index 0000000..5a2241d --- /dev/null +++ b/0005-sandbox-linux-seccomp-bpf-Add-ppc64-syscall-stub.patch @@ -0,0 +1,107 @@ +From 1d44643a7c7cf650efd1093d22cd5bf859fdcb51 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 9 Aug 2018 20:52:13 -0500 +Subject: [PATCH] sandbox/linux/seccomp-bpf: Add ppc64 syscall stub + +--- + sandbox/linux/seccomp-bpf/syscall.cc | 53 ++++++++++++++++++++++++++-- + 1 file changed, 51 insertions(+), 2 deletions(-) + +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf/syscall.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf/syscall.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf/syscall.cc +@@ -18,7 +18,7 @@ namespace sandbox { + namespace { + + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined (ARCH_CPU_PPC64_FAMILY) + // Number that's not currently used by any Linux kernel ABIs. + const int kInvalidSyscallNumber = 0x351d3; + #else +@@ -308,10 +308,54 @@ asm(// We need to be able to tell the ke + "2:ret\n" + ".cfi_endproc\n" + ".size SyscallAsm, .-SyscallAsm\n" ++#elif defined(__powerpc64__) ++ ".text\n" ++ ".align 4\n" ++ ".type SyscallAsm @function\n" ++ "SyscallAsm:\n" ++ ".cfi_startproc\n" ++ ++ // Check if r3 is negative ++ "cmpdi 3, 0\n" ++ "bgt 2f\n" ++ ++ // Load address of 3f into r3 and return ++ "mflr 10\n" ++ "bl 1f\n" ++ "1: mflr 3\n" ++ "mtlr 10\n" ++ "addi 3, 3, 4*13\n" ++ "blr\n" ++ ++ // Load arguments from array into r3-8 ++ // save param 3 in r10 ++ "2:\n" ++ "mr 0, 3\n" ++ "ld 3, 0(4)\n" ++ "ld 5, 16(4)\n" ++ "ld 6, 24(4)\n" ++ "ld 7, 32(4)\n" ++ "ld 8, 40(4)\n" ++ "ld 4, 8(4)\n" ++ "li 9, 0\n" ++ ++ // Enter kernel ++ "sc\n" ++ ++ // Magic return address ++ "3:\n" ++ // Like MIPS, ppc64 return values are always positive. ++ // Check for error in cr0.SO and negate upon error ++ "bc 4, 3, 4f\n" ++ "neg 3, 3\n" ++ "4: blr\n" ++ ++ ".cfi_endproc\n" ++ ".size SyscallAsm, .-SyscallAsm\n" + #endif + ); // asm + +-#if defined(__x86_64__) ++#if defined(__x86_64__) || defined(__powerpc64__) + extern "C" { + intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]); + } +@@ -425,6 +469,8 @@ intptr_t Syscall::Call(int nr, + ret = inout; + } + ++#elif defined(__powerpc64__) ++ intptr_t ret = SyscallAsm(nr, args); + #else + #error "Unimplemented architecture" + #endif +@@ -441,8 +487,18 @@ void Syscall::PutValueInUcontext(intptr_ + // needs to be changed back. + ret_val = -ret_val; + SECCOMP_PARM4(ctx) = 1; +- } else ++ } else { + SECCOMP_PARM4(ctx) = 0; ++ } ++#endif ++#if defined(__powerpc64__) ++ // Same as MIPS, need to invert ret and set error register (cr0.SO) ++ if (ret_val <= -1 && ret_val >= -4095) { ++ ret_val = -ret_val; ++ ctx->uc_mcontext.regs->ccr |= (1 << 28); ++ } else { ++ ctx->uc_mcontext.regs->ccr &= ~(1 << 28); ++ } + #endif + SECCOMP_RESULT(ctx) = static_cast(ret_val); + } diff --git a/0005-sandbox-linux-update-unit-test-for-ppc64.patch b/0005-sandbox-linux-update-unit-test-for-ppc64.patch new file mode 100644 index 0000000..ee20e07 --- /dev/null +++ b/0005-sandbox-linux-update-unit-test-for-ppc64.patch @@ -0,0 +1,24 @@ +From 6a852c4135864ba87b3cbdd0880d7cfecf7cd654 Mon Sep 17 00:00:00 2001 +From: Shawn Anastasio +Date: Thu, 13 Sep 2018 15:12:22 -0500 +Subject: [PATCH 5/6] sandbox/linux: update unit test for ppc64 + +--- + sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc +@@ -331,8 +331,10 @@ TEST_BASELINE_SIGSYS(__NR_timer_create) + + #if !defined(__aarch64__) + TEST_BASELINE_SIGSYS(__NR_inotify_init) ++#if !defined(__powerpc64__) + TEST_BASELINE_SIGSYS(__NR_vserver) + #endif ++#endif + + #if defined(LIBC_GLIBC) && !BUILDFLAG(IS_CHROMEOS_ASH) + BPF_TEST_C(BaselinePolicy, FutexEINVAL, BaselinePolicy) { diff --git a/0006-sandbox-linux-disable-timedwait-time64-ppc64.patch b/0006-sandbox-linux-disable-timedwait-time64-ppc64.patch new file mode 100644 index 0000000..dae8134 --- /dev/null +++ b/0006-sandbox-linux-disable-timedwait-time64-ppc64.patch @@ -0,0 +1,14 @@ +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc +@@ -374,7 +374,9 @@ bool SyscallSets::IsAllowedSignalHandlin + #if defined(__i386__) || defined(__arm__) || \ + (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS)) || \ + defined(__powerpc64__) ++#if !defined(__powerpc64__) + case __NR_rt_sigtimedwait_time64: ++#endif + case __NR_sigaction: + case __NR_sigprocmask: + case __NR_sigreturn: diff --git a/0007-sandbox-linux-add-ppc64-stat.patch b/0007-sandbox-linux-add-ppc64-stat.patch new file mode 100644 index 0000000..abad1b5 --- /dev/null +++ b/0007-sandbox-linux-add-ppc64-stat.patch @@ -0,0 +1,33 @@ +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/linux_stat.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/system_headers/linux_stat.h ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/linux_stat.h +@@ -173,6 +173,28 @@ struct kernel_stat { + unsigned int __unused4; + unsigned int __unused5; + }; ++#elif defined(__powerpc64__) ++struct kernel_stat { ++ unsigned long st_dev; ++ ino_t st_ino; ++ unsigned long st_nlink; ++ mode_t st_mode; ++ uid_t st_uid; ++ gid_t st_gid; ++ unsigned long st_rdev; ++ long st_size; ++ unsigned long st_blksize; ++ unsigned long st_blocks; ++ // unsigned long st_atime; ++ unsigned long st_atime_nsec; ++ //unsigned long st_mtime; ++ unsigned long st_mtime_nsec; ++ //unsigned long st_ctime; ++ unsigned long st_ctime_nsec; ++ unsigned long __unused4; ++ unsigned long __unused5; ++ unsigned long __unused6; ++}; + #endif + + #if !defined(AT_EMPTY_PATH) diff --git a/0008-sandbox-fix-ppc64le-glibc234.patch b/0008-sandbox-fix-ppc64le-glibc234.patch new file mode 100644 index 0000000..185aefc --- /dev/null +++ b/0008-sandbox-fix-ppc64le-glibc234.patch @@ -0,0 +1,91 @@ +Index: chromium-120.0.6099.71/sandbox/policy/linux/bpf_utility_policy_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/policy/linux/bpf_utility_policy_linux.cc ++++ chromium-120.0.6099.71/sandbox/policy/linux/bpf_utility_policy_linux.cc +@@ -34,7 +34,7 @@ ResultExpr UtilityProcessPolicy::Evaluat + case __NR_fdatasync: + case __NR_fsync: + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_getrlimit: + #endif + #if defined(__i386__) || defined(__arm__) +Index: chromium-120.0.6099.71/sandbox/policy/linux/bpf_renderer_policy_linux.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/policy/linux/bpf_renderer_policy_linux.cc ++++ chromium-120.0.6099.71/sandbox/policy/linux/bpf_renderer_policy_linux.cc +@@ -87,7 +87,7 @@ ResultExpr RendererProcessPolicy::Evalua + case __NR_ftruncate64: + #endif + #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \ +- defined(__aarch64__) ++ defined(__aarch64__) || defined(__powerpc64__) + case __NR_getrlimit: + case __NR_setrlimit: + // We allow setrlimit to dynamically adjust the address space limit as +Index: chromium-120.0.6099.71/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/bpf_dsl/linux_syscall_ranges.h ++++ chromium-120.0.6099.71/sandbox/linux/bpf_dsl/linux_syscall_ranges.h +@@ -58,9 +58,9 @@ + + #elif defined(__powerpc64__) + +-#include ++#include + #define MIN_SYSCALL 0u +-#define MAX_PUBLIC_SYSCALL 386u ++#define MAX_PUBLIC_SYSCALL __NR_syscalls + #define MAX_SYSCALL MAX_PUBLIC_SYSCALL + + #else +Index: chromium-120.0.6099.71/sandbox/linux/services/credentials.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/services/credentials.cc ++++ chromium-120.0.6099.71/sandbox/linux/services/credentials.cc +@@ -89,7 +89,9 @@ bool ChrootToSafeEmptyDir() { + + int clone_flags = CLONE_FS | LINUX_SIGCHLD; + void* tls = nullptr; +-#if (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY)) && \ ++// RAJA this might be it... ++#if (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY) || \ ++ defined(ARCH_CPU_PPC64_FAMILY)) && \ + !defined(MEMORY_SANITIZER) + // Use CLONE_VM | CLONE_VFORK as an optimization to avoid copying page tables. + // Since clone writes to the new child's TLS before returning, we must set a +@@ -97,6 +99,11 @@ bool ChrootToSafeEmptyDir() { + // glibc performs syscalls by calling a function pointer in TLS, so we do not + // attempt this optimization. + // TODO(crbug.com/1247458) Broken in MSan builds after LLVM f1bb30a4956f. ++ // ++ // NOTE: Without CLONE_VM, fontconfig will attempt to reload configuration ++ // in every thread. Since the rendered threads are sandboxed without ++ // filesystem access (e.g. to /etc/fonts/fonts.conf) this will cause font ++ // configuraiton loading failures and no fonts will be displayed! + clone_flags |= CLONE_VM | CLONE_VFORK | CLONE_SETTLS; + + // PTHREAD_STACK_MIN can be dynamic in glibc2.34+, so it is not possible to +Index: chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc ++++ chromium-120.0.6099.71/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc +@@ -357,7 +357,16 @@ intptr_t SIGSYSFstatatHandler(const stru + if (args.nr == __NR_fstatat_default) { + if (*reinterpret_cast(args.args[1]) == '\0' && + args.args[3] == static_cast(AT_EMPTY_PATH)) { +- return syscall(__NR_fstat_default, static_cast(args.args[0]), ++ int fd = static_cast(args.args[0]); ++#if defined(__powerpc64__) ++ // On ppc64+glibc, some syscalls seem to accidentally negate the first ++ // parameter which causes checks against it to fail. For now, manually ++ // negate them back. ++ // TODO: Investigate the root cause and fix in glibc ++ if (fd < 0) ++ fd = -fd; ++#endif ++ return syscall(__NR_fstat_default, fd, + reinterpret_cast(args.args[2])); + } + return -reinterpret_cast(fs_denied_errno); diff --git a/HACK-debian-clang-disable-skia-musttail.patch b/HACK-debian-clang-disable-skia-musttail.patch new file mode 100644 index 0000000..6e25520 --- /dev/null +++ b/HACK-debian-clang-disable-skia-musttail.patch @@ -0,0 +1,13 @@ +Index: chromium-120.0.6099.71/third_party/skia/src/core/SkRasterPipeline.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/skia/src/core/SkRasterPipeline.h ++++ chromium-120.0.6099.71/third_party/skia/src/core/SkRasterPipeline.h +@@ -24,7 +24,7 @@ enum SkColorType : int; + struct SkImageInfo; + struct skcms_TransferFunction; + +-#if __has_cpp_attribute(clang::musttail) && !defined(__EMSCRIPTEN__) && !defined(SK_CPU_ARM32) ++#if __has_cpp_attribute(clang::musttail) && !defined(__EMSCRIPTEN__) && !defined(SK_CPU_ARM32) && !defined(__powerpc64__) + #define SK_HAS_MUSTTAIL 1 + #else + #define SK_HAS_MUSTTAIL 0 diff --git a/HACK-third_party-libvpx-use-generic-gnu.patch b/HACK-third_party-libvpx-use-generic-gnu.patch new file mode 100644 index 0000000..c8aaab6 --- /dev/null +++ b/HACK-third_party-libvpx-use-generic-gnu.patch @@ -0,0 +1,48 @@ +Index: chromium-120.0.6099.71/third_party/libvpx/generate_gni.sh +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/generate_gni.sh ++++ chromium-120.0.6099.71/third_party/libvpx/generate_gni.sh +@@ -411,7 +411,7 @@ gen_config_files linux/mipsel "--target= + gen_config_files linux/mips64el "--target=mips64-linux-gcc ${all_platforms}" + gen_config_files linux/loongarch \ + "--target=loongarch64-linux-gcc ${all_platforms}" +-gen_config_files linux/ppc64 "--target=ppc64le-linux-gcc ${all_platforms}" ++gen_config_files linux/ppc64 "--target=generic-gnu $HIGHBD ${all_platforms}" + gen_config_files linux/generic "--target=generic-gnu $HIGHBD ${all_platforms}" + gen_config_files win/arm64-highbd \ + "--target=arm64-win64-vs15 ${all_platforms} ${HIGHBD}" +Index: chromium-120.0.6099.71/third_party/libvpx/source/libvpx/build/make/rtcd.pl +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/source/libvpx/build/make/rtcd.pl ++++ chromium-120.0.6099.71/third_party/libvpx/source/libvpx/build/make/rtcd.pl +@@ -492,8 +492,9 @@ if ($opts{arch} eq 'x86') { + &require(@REQUIRES); + arm; + } elsif ($opts{arch} =~ /^ppc/ ) { +- @ALL_ARCHS = filter(qw/vsx/); +- ppc; ++ #@ALL_ARCHS = filter(qw/vsx/); ++ #ppc; ++ unoptimized; + } elsif ($opts{arch} =~ /loongarch/ ) { + @ALL_ARCHS = filter(qw/lsx lasx/); + loongarch; +Index: chromium-120.0.6099.71/third_party/libvpx/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/libvpx/BUILD.gn ++++ chromium-120.0.6099.71/third_party/libvpx/BUILD.gn +@@ -93,6 +93,14 @@ config("libvpx_config") { + "-Wno-sign-compare", + ] + } ++ ++ if (current_cpu == "ppc64") { ++ cflags += [ ++ "-mcpu=power8", ++ "-maltivec", ++ "-mvsx", ++ ] ++ } + } + + # This config is applied to targets that depend on libvpx. diff --git a/Rtc_base-system-arch.h-PPC.patch b/Rtc_base-system-arch.h-PPC.patch new file mode 100644 index 0000000..d00e0b4 --- /dev/null +++ b/Rtc_base-system-arch.h-PPC.patch @@ -0,0 +1,23 @@ +Index: chromium-120.0.6099.71/third_party/webrtc/rtc_base/system/arch.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/webrtc/rtc_base/system/arch.h ++++ chromium-120.0.6099.71/third_party/webrtc/rtc_base/system/arch.h +@@ -46,6 +46,18 @@ + #endif + #if defined(__MIPSEL__) + #define WEBRTC_ARCH_LITTLE_ENDIAN ++#elif defined(__PPC__) ++#define WEBRTC_ARCH_PPC_FAMILY ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++#define WEBRTC_ARCH_LITTLE_ENDIAN ++#else ++#define WEBRTC_ARCH_BIG_ENDIAN ++#endif ++#if defined(__LP64__) ++#define WEBRTC_ARCH_64_BITS ++#else ++#define WEBRTC_ARCH_32_BITS ++#endif + #else + #define WEBRTC_ARCH_BIG_ENDIAN + #endif diff --git a/Sandbox-linux-services-credentials.cc-PPC.patch b/Sandbox-linux-services-credentials.cc-PPC.patch new file mode 100644 index 0000000..a93598f --- /dev/null +++ b/Sandbox-linux-services-credentials.cc-PPC.patch @@ -0,0 +1,13 @@ +Index: chromium-120.0.6099.71/sandbox/linux/services/credentials.cc +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/services/credentials.cc ++++ chromium-120.0.6099.71/sandbox/linux/services/credentials.cc +@@ -80,7 +80,7 @@ bool ChrootToSafeEmptyDir() { + pid_t pid = -1; + alignas(16) char stack_buf[PTHREAD_STACK_MIN]; + #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \ +- defined(ARCH_CPU_MIPS_FAMILY) ++ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_PPC64_FAMILY) + // The stack grows downward. + void* stack = stack_buf + sizeof(stack_buf); + #else diff --git a/chromium-122-rust-clang_lib.patch b/chromium-122-rust-clang_lib.patch index 1212438..111b570 100644 --- a/chromium-122-rust-clang_lib.patch +++ b/chromium-122-rust-clang_lib.patch @@ -28,8 +28,8 @@ diff -up chromium-121.0.6167.57/build/config/clang/BUILD.gn.rust-clang_lib chrom + _dir = "armhf-redhat-linux-gnu" + _suffix = "-armhf" + } else if (current_cpu == "ppc64") { -+ _dir = "powerpc64le-redhat-linux-gnu" -+ _suffix = "-powerpc64le" ++ _dir = "ppc64le-redhat-linux-gnu" ++ _suffix = "-ppc64le" + } else { + assert(false) # Unhandled cpu type + } diff --git a/chromium.spec b/chromium.spec index ab53d6c..0dfbb2e 100644 --- a/chromium.spec +++ b/chromium.spec @@ -304,7 +304,7 @@ Name: chromium%{chromium_channel} Version: 122.0.6261.111 -Release: 1%{?dist} +Release: 2%{?dist} Summary: A WebKit (Blink) powered web browser that Google doesn't want you to use Url: http://www.chromium.org/Home License: BSD-3-Clause AND LGPL-2.1-or-later AND Apache-2.0 AND IJG AND MIT AND GPL-2.0-or-later AND ISC AND OpenSSL AND (MPL-1.1 OR GPL-2.0-only OR LGPL-2.0-only) @@ -483,8 +483,87 @@ Patch357: chromium-122-clang16-disable-auto-upgrade-debug-info.patch # set clang_lib path Patch358: chromium-122-rust-clang_lib.patch +# PowerPC64 LE support +# Patches taken from Debian, Timothy Pearson's patchset +# https://salsa.debian.org/chromium-team/chromium/-/tree/master/debian/patches/ppc64le?ref_type=heads +Patch360: 0001-linux-seccomp-bpf-ppc64-glibc-workaround-in-SIGSYS-h.patch +Patch361: 0001-sandbox-Enable-seccomp_bpf-for-ppc64.patch +Patch362: 0001-services-service_manager-sandbox-linux-Fix-TCGETS-de.patch +Patch363: 0001-sandbox-linux-bpf_dsl-Update-syscall-ranges-for-ppc6.patch +Patch364: 0001-sandbox-linux-Implement-partial-support-for-ppc64-sy.patch +Patch365: 0001-sandbox-linux-Update-IsSyscallAllowed-in-broker_proc.patch +Patch366: 0001-sandbox-linux-Update-syscall-helpers-lists-for-ppc64.patch +Patch367: 0002-sandbox-linux-bpf_dsl-Modify-seccomp_macros-to-add-s.patch +Patch368: 0003-sandbox-linux-system_headers-Update-linux-seccomp-he.patch +Patch369: 0004-sandbox-linux-system_headers-Update-linux-signal-hea.patch +Patch370: 0005-sandbox-linux-seccomp-bpf-Add-ppc64-syscall-stub.patch +Patch371: 0005-sandbox-linux-update-unit-test-for-ppc64.patch +Patch372: 0006-sandbox-linux-disable-timedwait-time64-ppc64.patch +Patch373: 0007-sandbox-linux-add-ppc64-stat.patch +Patch374: Sandbox-linux-services-credentials.cc-PPC.patch +Patch375: 0008-sandbox-fix-ppc64le-glibc234.patch + +Patch376: 0001-third_party-angle-Include-missing-header-cstddef-in-.patch +Patch377: 0001-Add-PPC64-support-for-boringssl.patch +Patch378: 0001-third_party-libvpx-Properly-generate-gni-on-ppc64.patch +Patch379: 0001-third_party-lss-Don-t-look-for-mmap2-on-ppc64.patch +Patch380: 0001-third_party-pffft-Include-altivec.h-on-ppc64-with-SI.patch +Patch381: 0002-third_party-lss-kernel-structs.patch + +Patch382: Rtc_base-system-arch.h-PPC.patch + +Patch383: 0002-Include-cstddef-to-fix-build.patch +Patch384: 0004-third_party-crashpad-port-curl-transport-ppc64.patch + +Patch385: HACK-third_party-libvpx-use-generic-gnu.patch +Patch386: HACK-debian-clang-disable-skia-musttail.patch + +Patch387: 0001-Add-ppc64-target-to-libaom.patch +Patch388: 0001-Add-pregenerated-config-for-libaom-on-ppc64.patch + +Patch389: 0002-third_party-libvpx-Remove-bad-ppc64-config.patch +Patch390: 0002-third-party-boringssl-add-generated-files.patch +Patch391: 0003-third_party-libvpx-Add-ppc64-generated-config.patch +# Enabling VSX causes artifacts to appear in VP9 videos +Patch392: 0003-third_party-libvpx-Add-ppc64-vsx-files.patch +Patch393: 0003-third_party-ffmpeg-Add-ppc64-generated-config.patch +Patch394: 0004-third_party-libvpx-work-around-ambiguous-vsx.patch + +# Enable VSX acceleration in Skia. Requires POWER8 or higher. +Patch395: skia-vsx-instructions.patch + +Patch396: 0001-Implement-support-for-ppc64-on-Linux.patch +Patch397: 0001-Implement-support-for-PPC64-on-Linux.patch +Patch398: 0001-Force-baseline-POWER8-AltiVec-VSX-CPU-features-when-.patch +Patch399: fix-rustc.patch +Patch400: fix-rust-linking.patch +Patch401: fix-breakpad-compile.patch +Patch402: fix-partition-alloc-compile.patch +Patch403: 0002-Add-ppc64-trap-instructions.patch +Patch404: 0001-Fix-highway-ppc-hwcap.patch + +Patch405: 0001-Add-PPC64-support-for-libdav1d.patch +Patch406: 0001-Fix-libdav1d-compilation-on-clang-ppc.patch +Patch407: fix-ppc64-linux-syscalls-headers.patch +Patch408: 0003-thirdparty-fix-dav1d-gn.patch +Patch409: use-sysconf-page-size-on-ppc64.patch + +Patch410: dawn-fix-typos.patch +Patch411: dawn-fix-ppc64le-detection.patch + +Patch412: fix-swiftshader-compile.patch + +# Suppress harmless compiler warning messages that appear on ppc64 due to arch-specific warning flags being passed +Patch413: fix-unknown-warning-option-messages.diff + +# Needed on Debian while POWER8 remains the build target +# POWER9 enables hardware 128 bit vector support (ISA 3.0), +# and Highway gets confused when building in POWER8 mode +# (POWER8 compiler flags) on POWER9 hosts. +Patch414: 0002-Highway-disable-128-bit-vsx.patch + # upstream patches -Patch400: chromium-122-el8-support-64kpage.patch +Patch500: chromium-122-el8-support-64kpage.patch # Use chromium-latest.py to generate clean tarball from released build tarballs, found here: # http://build.chromium.org/buildbot/official/ @@ -857,12 +936,17 @@ Requires: u2f-hidraw-policy Requires: chromium-common%{_isa} = %{version}-%{release} # rhel 7: x86_64 -# rhel 8+ and fedora 37+: x86_64 aarch64 +# rhel 8 or newer: x86_64, aarch64 +# fedora 38 or newer: x86_64, aarch64, ppc64le %if 0%{?rhel} == 7 ExclusiveArch: x86_64 %else +%if 0%{?fedora} >= 40 +ExclusiveArch: x86_64 aarch64 ppc64le +%else ExclusiveArch: x86_64 aarch64 %endif +%endif # Bundled bits (I'm sure I've missed some) Provides: bundled(angle) = 2422 @@ -1159,9 +1243,79 @@ udev. %patch -P357 -p1 -b .clang16-disable-auto-upgrade-debug-info %patch -P358 -p1 -b .rust-clang_lib +%ifarch ppc64le +%patch -P360 -p1 -b .0001-linux-seccomp-bpf-ppc64-glibc-workaround-in-SIGSYS-h +%patch -P361 -p1 -b .0001-sandbox-Enable-seccomp_bpf-for-ppc64 +%patch -P362 -p1 -b .0001-services-service_manager-sandbox-linux-Fix-TCGETS-de +%patch -P363 -p1 -b .0001-sandbox-linux-bpf_dsl-Update-syscall-ranges-for-ppc6 +%patch -P364 -p1 -b .0001-sandbox-linux-Implement-partial-support-for-ppc64-sy +%patch -P365 -p1 -b .0001-sandbox-linux-Update-IsSyscallAllowed-in-broker_proc +%patch -P366 -p1 -b .0001-sandbox-linux-Update-syscall-helpers-lists-for-ppc64 +%patch -P367 -p1 -b .0002-sandbox-linux-bpf_dsl-Modify-seccomp_macros-to-add-s +%patch -P368 -p1 -b .0003-sandbox-linux-system_headers-Update-linux-seccomp-he +%patch -P369 -p1 -b .0004-sandbox-linux-system_headers-Update-linux-signal-hea +%patch -P370 -p1 -b .0005-sandbox-linux-seccomp-bpf-Add-ppc64-syscall-stub +%patch -P371 -p1 -b .0005-sandbox-linux-update-unit-test-for-ppc64 +%patch -P372 -p1 -b .0006-sandbox-linux-disable-timedwait-time64-ppc64 +%patch -P373 -p1 -b .0007-sandbox-linux-add-ppc64-stat +%patch -P374 -p1 -b .Sandbox-linux-services-credentials.cc-PPC +%patch -P375 -p1 -b .0008-sandbox-fix-ppc64le-glibc234 + +%patch -P376 -p1 -b .0001-third_party-angle-Include-missing-header-cstddef-in- +%patch -P377 -p1 -b .0001-Add-PPC64-support-for-boringssl +%patch -P378 -p1 -b .0001-third_party-libvpx-Properly-generate-gni-on-ppc64 +%patch -P379 -p1 -b .0001-third_party-lss-Don-t-look-for-mmap2-on-ppc64 +%patch -P380 -p1 -b .0001-third_party-pffft-Include-altivec.h-on-ppc64-with-SI +%patch -P381 -p1 -b .0002-third_party-lss-kernel-structs + +%patch -P382 -p1 -b .Rtc_base-system-arch.h-PPC + +%patch -P383 -p1 -b .0002-Include-cstddef-to-fix-build +%patch -P384 -p1 -b .0004-third_party-crashpad-port-curl-transport-ppc64 + +%patch -P385 -p1 -b .HACK-third_party-libvpx-use-generic-gnu +%patch -P386 -p1 -b .HACK-debian-clang-disable-skia-musttail + +%patch -P387 -p1 -b .0001-Add-ppc64-target-to-libaom +%patch -P388 -p1 -b .0001-Add-pregenerated-config-for-libaom-on-ppc64 + +%patch -P389 -p1 -b .0002-third_party-libvpx-Remove-bad-ppc64-config +%patch -P390 -p1 -b .0002-third-party-boringssl-add-generated-files +%patch -P391 -p1 -b .0003-third_party-libvpx-Add-ppc64-generated-config +#patch -P392 -p1 -b .0003-third_party-libvpx-Add-ppc64-vsx-files +%patch -P393 -p1 -b .0003-third_party-ffmpeg-Add-ppc64-generated-config +%patch -P394 -p1 -b .0004-third_party-libvpx-work-around-ambiguous-vsx + +%patch -P395 -p1 -b .skia-vsx-instructions + +%patch -P396 -p1 -b .0001-Implement-support-for-ppc64-on-Linux +%patch -P397 -p1 -b .0001-Implement-support-for-PPC64-on-Linux +%patch -P398 -p1 -b .0001-Force-baseline-POWER8-AltiVec-VSX-CPU-features-when- +%patch -P399 -p1 -b .fix-rustc +%patch -P400 -p1 -b .fix-rust-linking +%patch -P401 -p1 -b .fix-breakpad-compile +%patch -P402 -p1 -b .fix-partition-alloc-compile +%patch -P403 -p1 -b .0002-Add-ppc64-trap-instructions +%patch -P404 -p1 -b .0001-Fix-highway-ppc-hwcap + +%patch -P405 -p1 -b .0001-Add-PPC64-support-for-libdav1d +%patch -P406 -p1 -b .0001-Fix-libdav1d-compilation-on-clang-ppc +%patch -P407 -p1 -b .fix-ppc64-linux-syscalls-headers +%patch -P408 -p1 -b .0003-thirdparty-fix-dav1d-gn +%patch -P409 -p1 -b .use-sysconf-page-size-on-ppc64 + +%patch -P410 -p1 -b .dawn-fix-typos +%patch -P411 -p1 -b .dawn-fix-ppc64le-detection + +%patch -P412 -p1 -b .fix-swiftshader-compile.patch +%patch -P413 -p1 -b .fix-unknown-warning-option-messages + +%patch -P414 -p1 -b .0002-Highway-disable-128-bit-vsx +%endif + %%ifarch aarch64 %if 0%{?rhel} == 8 -%patch -P400 -p1 -b .el8-support-64kpage.patch +%patch -P500 -p1 -b .el8-support-64kpage.patch %endif %endif @@ -1361,6 +1515,10 @@ CHROMIUM_CORE_GN_DEFINES+=' use_gold=false' CHROMIUM_CORE_GN_DEFINES+=' target_cpu="arm64"' %endif +%ifarch ppc64le +CHROMIUM_CORE_GN_DEFINES+=' target_cpu="ppc64"' +%endif + CHROMIUM_CORE_GN_DEFINES+=' icu_use_data_file=true' CHROMIUM_CORE_GN_DEFINES+=' target_os="linux"' CHROMIUM_CORE_GN_DEFINES+=' current_os="linux"' @@ -1628,7 +1786,7 @@ mkdir -p %{buildroot}%{_mandir}/man1/ pushd %{builddir} cp -a chrom*.pak resources.pak icudtl.dat %{buildroot}%{chromium_path} cp -a locales/*.pak %{buildroot}%{chromium_path}/locales/ - %ifarch x86_64 aarch64 + %ifarch x86_64 aarch64 ppc64le cp -a libvk_swiftshader.so %{buildroot}%{chromium_path} cp -a libvulkan.so.1 %{buildroot}%{chromium_path} cp -a vk_swiftshader_icd.json %{buildroot}%{chromium_path} @@ -1658,12 +1816,12 @@ pushd %{builddir} %if %{build_clear_key_cdm} %ifarch x86_64 cp -a ClearKeyCdm/_platform_specific/linux_x64/libclearkeycdm.so %{buildroot}%{chromium_path} - %else - %ifarch aarch64 - cp -a ClearKeyCdm/_platform_specific/linux_arm64/libclearkeycdm.so %{buildroot}%{chromium_path} - %else - cp -a libclearkeycdm.so %{buildroot}%{chromium_path} - %endif + %endif + %ifarch aarch64 + cp -a ClearKeyCdm/_platform_specific/linux_arm64/libclearkeycdm.so %{buildroot}%{chromium_path} + %endif + %ifarch ppc64le + cp -a ClearKeyCdm/_platform_specific/linux_ppc64/libclearkeycdm.so %{buildroot}%{chromium_path} %endif %endif @@ -1843,7 +2001,7 @@ getent group chrome-remote-desktop >/dev/null || groupadd -r chrome-remote-deskt %if %{build_clear_key_cdm} %{chromium_path}/libclearkeycdm.so %endif -%ifarch x86_64 aarch64 +%ifarch x86_64 aarch64 ppc64le %{chromium_path}/libvk_swiftshader.so* %{chromium_path}/libvulkan.so* %{chromium_path}/vk_swiftshader_icd.json @@ -1952,6 +2110,9 @@ getent group chrome-remote-desktop >/dev/null || groupadd -r chrome-remote-deskt %endif %changelog +* Mon Mar 11 2024 Than Ngo - 122.0.6261.111-2 +- enable ppc64le build + * Wed Mar 06 2024 Than Ngo - 122.0.6261.111-1 - upstream security release 122.0.6261.111 * High CVE-2024-2173: Out of bounds memory access in V8 diff --git a/dawn-fix-ppc64le-detection.patch b/dawn-fix-ppc64le-detection.patch new file mode 100644 index 0000000..0fe5149 --- /dev/null +++ b/dawn-fix-ppc64le-detection.patch @@ -0,0 +1,19 @@ +Index: chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Platform.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/dawn/src/dawn/common/Platform.h ++++ chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Platform.h +@@ -158,10 +158,12 @@ + #elif defined(__s390x__) + #define DAWN_PLATFORM_IS_S390X 1 + +-#elif defined(__PPC__) +-#define DAWN_PLATFORM_IS_PPC 1 ++// Order matters here ++// PPC64 also defines PPC, which can lead to detection failures on ppc64le systems + #elif defined(__PPC64__) + #define DAWN_PLATFORM_IS_PPC64 1 ++#elif defined(__PPC__) ++#define DAWN_PLATFORM_IS_PPC 1 + + #else + #error "Unsupported platform." diff --git a/dawn-fix-typos.patch b/dawn-fix-typos.patch new file mode 100644 index 0000000..514463e --- /dev/null +++ b/dawn-fix-typos.patch @@ -0,0 +1,32 @@ +Index: chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Platform.h +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/dawn/src/dawn/common/Platform.h ++++ chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Platform.h +@@ -153,9 +153,9 @@ + #define DAWN_PLATFORM_IS_MIPS64 1 + #endif + +-#elif defiend(__s390__) ++#elif defined(__s390__) + #define DAWN_PLATFORM_IS_S390 1 +-#elif defiend(__s390x__) ++#elif defined(__s390x__) + #define DAWN_PLATFORM_IS_S390X 1 + + #elif defined(__PPC__) +Index: chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Assert.cpp +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/dawn/src/dawn/common/Assert.cpp ++++ chromium-120.0.6099.71/third_party/dawn/src/dawn/common/Assert.cpp +@@ -52,9 +52,9 @@ void BreakPoint() { + __asm__ __volatile__("ebreak"); + #elif DAWN_PLATFORM_IS(MIPS) + __asm__ __volatile__("break"); +-#elif DAWN_PLATFORM_IS(S390) || DAWN_PLATFORM_IS_(S390X) ++#elif DAWN_PLATFORM_IS(S390) || DAWN_PLATFORM_IS(S390X) + __asm__ __volatile__(".word 0x0001"); +-#elif DAWN_PLATFORM_IS(PPC) || DAWN_PLATFORM_IS_(PPC64) ++#elif DAWN_PLATFORM_IS(PPC) || DAWN_PLATFORM_IS(PPC64) + __asm__ __volatile__("twge 2,2"); + #else + #error "Unsupported platform" diff --git a/fix-breakpad-compile.patch b/fix-breakpad-compile.patch new file mode 100644 index 0000000..b90bb95 --- /dev/null +++ b/fix-breakpad-compile.patch @@ -0,0 +1,29 @@ +Index: chromium-120.0.6099.71/third_party/breakpad/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/third_party/breakpad/BUILD.gn ++++ chromium-120.0.6099.71/third_party/breakpad/BUILD.gn +@@ -618,7 +618,6 @@ if (is_linux || is_chromeos || is_androi + "breakpad/src/client/minidump_file_writer.h", + "breakpad/src/common/convert_UTF.cc", + "breakpad/src/common/convert_UTF.h", +- "breakpad/src/common/linux/breakpad_getcontext.S", + "breakpad/src/common/linux/elf_core_dump.cc", + "breakpad/src/common/linux/elf_core_dump.h", + "breakpad/src/common/linux/elfutils.cc", +@@ -650,6 +649,8 @@ if (is_linux || is_chromeos || is_androi + configs += [ "//build/config/compiler:no_chromium_code" ] + public_configs = [ ":client_config" ] + ++ defines = [ "HAVE_GETCONTEXT" ] ++ + if (current_cpu == "arm" && is_chromeos_ash) { + # Avoid running out of registers in + # linux_syscall_support.h:sys_clone()'s inline assembly. +@@ -707,7 +708,6 @@ if (is_linux || is_chromeos || is_androi + "breakpad/src/client/linux/minidump_writer/minidump_writer_unittest.cc", + "breakpad/src/client/linux/minidump_writer/minidump_writer_unittest_utils.cc", + "breakpad/src/client/linux/minidump_writer/proc_cpuinfo_reader_unittest.cc", +- "breakpad/src/common/linux/breakpad_getcontext_unittest.cc", + "breakpad/src/common/linux/elf_core_dump_unittest.cc", + "breakpad/src/common/linux/file_id_unittest.cc", + "breakpad/src/common/linux/linux_libc_support_unittest.cc", diff --git a/fix-partition-alloc-compile.patch b/fix-partition-alloc-compile.patch new file mode 100644 index 0000000..46ef037 --- /dev/null +++ b/fix-partition-alloc-compile.patch @@ -0,0 +1,16 @@ +kIndex: chromium-114.0.5735.45/base/allocator/partition_allocator/partition_alloc.gni +=================================================================== +Index: chromium-120.0.6099.71/base/allocator/partition_allocator/partition_alloc.gni +=================================================================== +--- chromium-120.0.6099.71.orig/base/allocator/partition_allocator/partition_alloc.gni ++++ chromium-120.0.6099.71/base/allocator/partition_allocator/partition_alloc.gni +@@ -16,7 +16,8 @@ if (is_nacl) { + # NaCl targets don't use 64-bit pointers. + has_64_bit_pointers = false + } else if (current_cpu == "x64" || current_cpu == "arm64" || +- current_cpu == "loong64" || current_cpu == "riscv64") { ++ current_cpu == "loong64" || current_cpu == "riscv64" || ++ current_cpu == "ppc64") { + has_64_bit_pointers = true + } else if (current_cpu == "x86" || current_cpu == "arm") { + has_64_bit_pointers = false diff --git a/fix-ppc64-linux-syscalls-headers.patch b/fix-ppc64-linux-syscalls-headers.patch new file mode 100644 index 0000000..825918b --- /dev/null +++ b/fix-ppc64-linux-syscalls-headers.patch @@ -0,0 +1,23 @@ +Index: chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_syscalls.h +=================================================================== +--- chromium-120.0.6099.71.orig/sandbox/linux/system_headers/ppc64_linux_syscalls.h ++++ chromium-120.0.6099.71/sandbox/linux/system_headers/ppc64_linux_syscalls.h +@@ -8,5 +8,18 @@ + #include + + //TODO: is it necessary to redefine syscall numbers for PPC64? ++// Needed for Ubuntu/Debian/Centos/RHEL: ++#if !defined(__NR_shmget) ++#define __NR_shmget 395 ++#endif ++#if !defined(__NR_shmdt) ++#define __NR_shmdt 398 ++#endif ++#if !defined(__NR_shmctl) ++#define __NR_shmctl 396 ++#endif ++#if !defined(__NR_shmat) ++#define __NR_shmat 397 ++#endif + + #endif // SANDBOX_LINUX_SYSTEM_HEADERS_PPC64_LINUX_SYSCALLS_H_ diff --git a/fix-rust-linking.patch b/fix-rust-linking.patch new file mode 100644 index 0000000..a2284e9 --- /dev/null +++ b/fix-rust-linking.patch @@ -0,0 +1,49 @@ +Index: chromium-121.0.6167.75/build/toolchain/gcc_toolchain.gni +=================================================================== +--- chromium-121.0.6167.75.orig/build/toolchain/gcc_toolchain.gni ++++ chromium-121.0.6167.75/build/toolchain/gcc_toolchain.gni +@@ -464,7 +464,13 @@ template("single_gcc_toolchain") { + # -soname flag is not available on aix ld + soname_flag = "-Wl,-soname=\"$soname\"" + } +- link_command = "$ld -shared $soname_flag {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" @\"$rspfile\" {{rlibs}}" ++ if (target_cpu == "ppc64") { ++ # Work around linker failures due to Rust libraries and the use of whole-archive ++ link_command = "$ld -shared $soname_flag -Wl,--start-group {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" @\"$rspfile\" {{rlibs}} -Wl,--end-group" ++ } ++ else { ++ link_command = "$ld -shared $soname_flag {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" @\"$rspfile\" {{rlibs}}" ++ } + + # Generate a map file to be used for binary size analysis. + # Map file adds ~10% to the link time on a z620. +@@ -576,7 +582,13 @@ template("single_gcc_toolchain") { + whole_archive_flag = "-Wl,--whole-archive" + no_whole_archive_flag = "-Wl,--no-whole-archive" + } +- command = "$ld -shared {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" $soname_flag @\"$rspfile\"" ++ if (target_cpu == "ppc64") { ++ # Work around linker failures due to Rust libraries and the use of whole-archive ++ command = "$ld -shared -Wl,--start-group {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" $soname_flag @\"$rspfile\" -Wl,--end-group" ++ } ++ else { ++ command = "$ld -shared {{ldflags}}${extra_ldflags} -o \"$unstripped_sofile\" $soname_flag @\"$rspfile\"" ++ } + + if (defined(invoker.strip)) { + strip_command = "${invoker.strip} -o \"$sofile\" \"$unstripped_sofile\"" +@@ -636,7 +648,13 @@ template("single_gcc_toolchain") { + start_group_flag = "-Wl,--start-group" + end_group_flag = "-Wl,--end-group " + } +- link_command = "$ld {{ldflags}}${extra_ldflags} -o \"$unstripped_outfile\" $start_group_flag @\"$rspfile\" {{solibs}} $end_group_flag {{libs}} {{rlibs}}" ++ if (target_cpu == "ppc64") { ++ # Work around linker failures due to Rust libraries and the use of whole-archive ++ link_command = "$ld -Wl,--start-group {{ldflags}}${extra_ldflags} -o \"$unstripped_outfile\" @\"$rspfile\" {{solibs}} {{libs}} {{rlibs}} -Wl,--end-group" ++ } ++ else { ++ link_command = "$ld {{ldflags}}${extra_ldflags} -o \"$unstripped_outfile\" $start_group_flag @\"$rspfile\" {{solibs}} $end_group_flag {{libs}} {{rlibs}}" ++ } + + # Generate a map file to be used for binary size analysis. + # Map file adds ~10% to the link time on a z620. diff --git a/fix-rustc.patch b/fix-rustc.patch new file mode 100644 index 0000000..ced9dcf --- /dev/null +++ b/fix-rustc.patch @@ -0,0 +1,13 @@ +author: Andres Salomon +description: allow ppc64le to build by using proper rustc target +--- a/build/config/rust.gni ++++ b/build/config/rust.gni +@@ -191,6 +191,8 @@ rust_abi_target = "" + if (is_linux || is_chromeos) { + if (current_cpu == "arm64") { + rust_abi_target = "aarch64-unknown-linux-gnu" ++ } else if (current_cpu == "ppc64") { ++ rust_abi_target = "powerpc64le-unknown-linux-gnu" + } else if (current_cpu == "x86") { + rust_abi_target = "i686-unknown-linux-gnu" + } else if (current_cpu == "x64") { diff --git a/fix-swiftshader-compile.patch b/fix-swiftshader-compile.patch new file mode 100644 index 0000000..5411a0d --- /dev/null +++ b/fix-swiftshader-compile.patch @@ -0,0 +1,26 @@ +--- chromium-101.0.4951.54/third_party/swiftshader/third_party/llvm-10.0/BUILD.gn.orig 2022-05-15 10:30:50.887333316 +0200 ++++ chromium-101.0.4951.54/third_party/swiftshader/third_party/llvm-10.0/BUILD.gn 2022-05-15 10:31:43.477318032 +0200 +@@ -133,7 +133,6 @@ swiftshader_llvm_source_set("swiftshader_llvm") { + if (is_ubsan_vptr) { + sources = [ + "llvm/lib/MC/MCWasmObjectTargetWriter.cpp", +- "llvm/lib/MC/MCXCOFFObjectTargetWriter.cpp", + "llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp", + "llvm/lib/Target/TargetIntrinsicInfo.cpp", + ] +@@ -583,6 +582,7 @@ swiftshader_llvm_source_set("swiftshader_llvm_most") { + "llvm/lib/MC/MCAsmInfoCOFF.cpp", + "llvm/lib/MC/MCAsmInfoDarwin.cpp", + "llvm/lib/MC/MCAsmInfoELF.cpp", ++ "llvm/lib/MC/MCAsmInfoXCOFF.cpp", + "llvm/lib/MC/MCAsmMacro.cpp", + "llvm/lib/MC/MCAsmStreamer.cpp", + "llvm/lib/MC/MCAssembler.cpp", +@@ -637,6 +637,7 @@ swiftshader_llvm_source_set("swiftshader_llvm_most") { + "llvm/lib/MC/MCWin64EH.cpp", + "llvm/lib/MC/MCWinCOFFStreamer.cpp", + "llvm/lib/MC/MCWinEH.cpp", ++ "llvm/lib/MC/MCXCOFFObjectTargetWriter.cpp", + "llvm/lib/MC/MCXCOFFStreamer.cpp", + "llvm/lib/MC/MachObjectWriter.cpp", + "llvm/lib/MC/StringTableBuilder.cpp", diff --git a/fix-unknown-warning-option-messages.diff b/fix-unknown-warning-option-messages.diff new file mode 100644 index 0000000..fef3a7e --- /dev/null +++ b/fix-unknown-warning-option-messages.diff @@ -0,0 +1,35 @@ +Index: chromium-120.0.6099.71/build/config/compiler/BUILD.gn +=================================================================== +--- chromium-120.0.6099.71.orig/build/config/compiler/BUILD.gn ++++ chromium-120.0.6099.71/build/config/compiler/BUILD.gn +@@ -1780,7 +1780,7 @@ config("default_warnings") { + + # -Wno-class-memaccess warns about hash table and vector in blink. + # But the violation is intentional. +- if (!is_nacl) { ++ if ((!is_nacl) && (current_cpu != "ppc64")) { + cflags_cc += [ "-Wno-class-memaccess" ] + } + +@@ -1790,7 +1790,9 @@ config("default_warnings") { + + # Don't warn about "maybe" uninitialized. Clang doesn't include this + # in -Wall but gcc does, and it gives false positives. +- cflags += [ "-Wno-maybe-uninitialized" ] ++ if (current_cpu != "ppc64") { ++ cflags += [ "-Wno-maybe-uninitialized" ] ++ } + cflags += [ "-Wno-deprecated-declarations" ] + + # -Wcomment gives too many false positives in the case a +@@ -1801,7 +1803,9 @@ config("default_warnings") { + + # -Wpacked-not-aligned complains all generated mojom-shared-internal.h + # files. +- cflags += [ "-Wno-packed-not-aligned" ] ++ if (current_cpu != "ppc64") { ++ cflags += [ "-Wno-packed-not-aligned" ] ++ } + } + } + diff --git a/skia-vsx-instructions.patch b/skia-vsx-instructions.patch new file mode 100644 index 0000000..0b2ff11 --- /dev/null +++ b/skia-vsx-instructions.patch @@ -0,0 +1,675 @@ +Index: chromium-122.0.6261.57/third_party/skia/BUILD.gn +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/BUILD.gn ++++ chromium-122.0.6261.57/third_party/skia/BUILD.gn +@@ -198,6 +198,12 @@ opts("skx") { + } + } + ++opts("vsx") { ++ enabled = current_cpu == "ppc64" ++ sources = skia_opts.vsx_sources ++ cflags = [ "-mcpu=power9", "-mtune=power9" ] ++} ++ + # Any feature of Skia that requires third-party code should be optional and use this template. + template("optional") { + if (invoker.enabled) { +@@ -1467,6 +1473,7 @@ skia_component("skia") { + ":skx", + ":typeface_fontations", + ":vello", ++ ":vsx", + ":webp_decode", + ":wuffs", + ":xml", +@@ -1644,7 +1651,10 @@ skia_static_library("pathkit") { + public_configs = [ ":skia_public" ] + configs = skia_library_configs + +- deps = [ ":hsw" ] ++ deps = [ ++ ":hsw", ++ ":vsx", ++ ] + + sources = [] + sources += skia_pathops_sources +Index: chromium-122.0.6261.57/third_party/skia/gn/skia/BUILD.gn +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/gn/skia/BUILD.gn ++++ chromium-122.0.6261.57/third_party/skia/gn/skia/BUILD.gn +@@ -163,6 +163,8 @@ config("default") { + "-mfpmath=sse", + ] + ldflags += [ "-m32" ] ++ } else if (current_cpu == "ppc64") { ++ cflags += [ "-mcpu=power9", "-mtune=power9" ] + } + + if (malloc != "" && !is_win) { +Index: chromium-122.0.6261.57/third_party/skia/include/core/SkTypes.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/include/core/SkTypes.h ++++ chromium-122.0.6261.57/third_party/skia/include/core/SkTypes.h +@@ -195,5 +195,44 @@ static constexpr uint32_t SK_InvalidGenI + */ + static constexpr uint32_t SK_InvalidUniqueID = 0; + ++////////////////////////////////////////////////////////////////////// ++// PPC defines ++ ++#if defined(__powerpc64__) || defined(__PPC64__) ++ #ifndef SK_CPU_PPC64 ++ #define SK_CPU_PPC64 ++ #endif ++ #undef SK_CPU_SSE_LEVEL ++#endif ++ ++// Newer versions of clang and gcc for ppc64 ship with wrappers that translate ++// Intel vector intrinsics into PPC VSX instrinsics, so we can pretend to have ++// to be Intel. Currently, full API support for SSSE3 on POWER8 and later ++// processors. ++#if defined(__POWER8_VECTOR__) && defined(__has_include) && \ ++ !defined(SK_CPU_SSE_LEVEL) ++ ++ // Clang ships both Intel and PPC headers in its PPC version, storing the ++ // PPC compatibility in a subdirectory that the compiler will include before ++ // its standard library include directory. ++ #if (__has_include() && !defined(__clang__)) || \ ++ __has_include() ++ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3 ++ #elif (__has_include() && !defined(__clang__)) || \ ++ __has_include() ++ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 ++ #endif ++ ++ #ifdef SK_CPU_SSE_LEVEL ++ #define SK_PPC64_HAS_SSE_COMPAT ++ #ifndef NO_WARN_X86_INTRINSICS ++ #define NO_WARN_X86_INTRINSICS ++ #endif ++ #if defined(__clang__) ++ #define SK_PPC64_CLANG_MFPPR_BUG ++ #endif ++ #endif ++#endif ++ + + #endif +Index: chromium-122.0.6261.57/third_party/skia/src/base/SkSpinlock.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/base/SkSpinlock.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/base/SkSpinlock.cpp +@@ -33,7 +33,8 @@ + #endif + + // Renamed from "pause" to avoid conflict with function defined in unistd.h +-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 ++#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && \ ++ !defined(SK_PPC64_CLANG_MFPPR_BUG) + #include + static void do_pause() { _mm_pause(); } + #else +Index: chromium-122.0.6261.57/third_party/skia/src/opts/SkBitmapProcState_opts.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/opts/SkBitmapProcState_opts.h ++++ chromium-122.0.6261.57/third_party/skia/src/opts/SkBitmapProcState_opts.h +@@ -21,7 +21,13 @@ + // The rest are scattershot at the moment but I want to get them + // all migrated to be normal code inside SkBitmapProcState.cpp. + +-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 ++#if defined(SK_PPC64_HAS_SSE_COMPAT) ++ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 ++ #include ++ #else ++ #include ++ #endif ++#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + #include + #elif defined(SK_ARM_HAS_NEON) + #include +Index: chromium-122.0.6261.57/third_party/skia/src/opts/SkBlitRow_opts.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/opts/SkBlitRow_opts.h ++++ chromium-122.0.6261.57/third_party/skia/src/opts/SkBlitRow_opts.h +@@ -69,7 +69,7 @@ + #endif + + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 +- #include ++ #include + + static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) { + __m128i scale = _mm_sub_epi32(_mm_set1_epi32(256), +Index: chromium-122.0.6261.57/third_party/skia/src/opts/SkRasterPipeline_opts.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/opts/SkRasterPipeline_opts.h ++++ chromium-122.0.6261.57/third_party/skia/src/opts/SkRasterPipeline_opts.h +@@ -1,5 +1,6 @@ + /* + * Copyright 2018 Google Inc. ++ * Copyright 2023 Raptor Engineering, LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. +@@ -74,6 +75,8 @@ using NoCtx = const void*; + #define JUMPER_IS_SCALAR + #elif defined(SK_ARM_HAS_NEON) + #define JUMPER_IS_NEON ++#elif defined(SK_PPC64_HAS_SSE_COMPAT) ++ #define JUMPER_IS_VSX + #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX + #define JUMPER_IS_SKX + #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 +@@ -106,6 +109,8 @@ using NoCtx = const void*; + #include + #elif defined(JUMPER_IS_NEON) + #include ++#elif defined(JUMPER_IS_VSX) ++ #include + #else + #include + #endif +@@ -197,6 +202,182 @@ namespace SK_OPTS_NS { + ptr[3] = a; + } + ++#elif defined(JUMPER_IS_VSX) ++ // Since we know we're using Clang, we can use its vector extensions. ++ template using V = T __attribute__((ext_vector_type(4))); ++ using F = V; ++ using I32 = V< int32_t>; ++ using U64 = V; ++ using U32 = V; ++ using U16 = V; ++ using U8 = V; ++ ++ // We polyfill a few routines that Clang doesn't build into ext_vector_types. ++ SI F min(F a, F b) { return vec_min(a,b); } ++ SI I32 min(I32 a, I32 b) { return vec_min(a,b); } ++ SI U32 min(U32 a, U32 b) { return vec_min(a,b); } ++ SI F max(F a, F b) { return vec_max(a,b); } ++ SI I32 max(I32 a, I32 b) { return vec_max(a,b); } ++ SI U32 max(U32 a, U32 b) { return vec_max(a,b); } ++ ++ SI F abs_ (F v) { return vec_abs(v); } ++ SI I32 abs_ (I32 v) { return vec_abs(v); } ++ SI F rcp_approx(F v) { return vec_re(v); } ++ SI F rcp_precise (F v) { F e = rcp_approx(v); return e * (2.0f - v * e); } ++ SI F rsqrt_approx (F v) { return vec_rsqrte(v); } ++ ++ SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); } ++ SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); } ++ ++ SI F if_then_else(I32 c, F t, F e) { ++ return vec_or((vector float)vec_and((vector float)c, (vector float)t), (vector float)vec_andc((vector float)e, (vector float)c)); ++ } ++ SI I32 if_then_else(I32 c, I32 t, I32 e) { ++ return vec_or((vector unsigned int)vec_and((vector unsigned int)c, (vector unsigned int)t), (vector unsigned int)vec_andc((vector unsigned int)e, (vector unsigned int)c)); ++ } ++ ++ // In both AltiVec and SSE there is no horizontal element compare, unlike ARM. Fall back to scalar operations here... ++ SI bool any(I32 c) { ++ if (vec_extract((U32)c, 0) != 0) return 1; ++ if (vec_extract((U32)c, 1) != 0) return 1; ++ if (vec_extract((U32)c, 2) != 0) return 1; ++ if (vec_extract((U32)c, 3) != 0) return 1; ++ return 0; ++ } ++ SI bool all(I32 c) { ++ if (vec_extract((U32)c, 0) == 0) return 0; ++ if (vec_extract((U32)c, 1) == 0) return 0; ++ if (vec_extract((U32)c, 2) == 0) return 0; ++ if (vec_extract((U32)c, 3) == 0) return 0; ++ return 1; ++ } ++ ++ SI F mad(F f, F m, F a) { return vec_madd(f,m,a); } ++ SI F floor_(F v) { return vec_floor(v); } ++ SI F ceil_(F v) { return vec_ceil(v); } ++ SI F sqrt_(F v) { return vec_sqrt(v); } ++ SI U32 round(F v) { return vec_cts((vector float)vec_rint(v), 0); } ++ SI U32 round(F v, F scale) { return vec_cts((vector float)vec_rint(v*scale), 0); } ++ ++ template ++ SI V gather(const T* p, U32 ix) { ++ return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]}; ++ } ++ template ++ SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) { ++ V before = gather(dst, ix); ++ V after = if_then_else(mask, src, before); ++ dst[ix[0]] = after[0]; ++ dst[ix[1]] = after[1]; ++ dst[ix[2]] = after[2]; ++ dst[ix[3]] = after[3]; ++ } ++ ++ // TODO ++ // Finish converting these functions from the SSE translation layer to native AltiVec / VSX ++ SI void load2(const uint16_t* ptr, U16* r, U16* g) { ++ __m128i _01; ++ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3 ++ auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3 ++ auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3 ++ ++ auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3 ++ auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3 ++ *r = sk_unaligned_load(&R); ++ *g = sk_unaligned_load(&G); ++ } ++ ++ SI void store2(uint16_t* ptr, U16 r, U16 g) { ++ U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)); ++ _mm_storeu_si128((__m128i*)ptr + 0, rg); ++ } ++ ++ SI void load3(const uint16_t* ptr, U16* r, U16* g, U16* b) { ++ __m128i _0, _1, _2, _3; ++ // Load slightly weirdly to make sure we don't load past the end of 4x48 bits. ++ auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) , ++ _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4); ++ ++ // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored). ++ _0 = _01; ++ _1 = _mm_srli_si128(_01, 6); ++ _2 = _23; ++ _3 = _mm_srli_si128(_23, 6); ++ ++ // De-interlace to R,G,B. ++ auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx ++ _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx ++ ++ auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 ++ G = _mm_srli_si128(R, 8), ++ B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx ++ ++ *r = sk_unaligned_load(&R); ++ *g = sk_unaligned_load(&G); ++ *b = sk_unaligned_load(&B); ++ } ++ ++ SI void load4(const uint16_t* ptr, U16* r, U16* g, U16* b, U16* a) { ++ __m128i _01, _23; ++ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1 ++ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3 ++ ++ auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2 ++ _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3 ++ ++ auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 ++ ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3 ++ ++ *r = sk_unaligned_load((uint16_t*)&rg + 0); ++ *g = sk_unaligned_load((uint16_t*)&rg + 4); ++ *b = sk_unaligned_load((uint16_t*)&ba + 0); ++ *a = sk_unaligned_load((uint16_t*)&ba + 4); ++ } ++ ++ SI void store4(uint16_t* ptr, U16 r, U16 g, U16 b, U16 a) { ++ auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)), ++ ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a)); ++ ++ _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba)); ++ _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba)); ++ } ++ ++ SI void load2(const float* ptr, F* r, F* g) { ++ F _01, _23; ++ _01 = _mm_loadu_ps(ptr + 0); ++ _23 = _mm_loadu_ps(ptr + 4); ++ *r = _mm_shuffle_ps(_01, _23, 0x88); ++ *g = _mm_shuffle_ps(_01, _23, 0xDD); ++ } ++ ++ SI void store2(float* ptr, F r, F g) { ++ F _01 = _mm_unpacklo_ps(r, g), ++ _23 = _mm_unpackhi_ps(r, g); ++ _mm_storeu_ps(ptr + 0, _01); ++ _mm_storeu_ps(ptr + 4, _23); ++ } ++ ++ SI void load4(const float* ptr, F* r, F* g, F* b, F* a) { ++ F _0, _1, _2, _3; ++ _0 = _mm_loadu_ps(ptr + 0); ++ _1 = _mm_loadu_ps(ptr + 4); ++ _2 = _mm_loadu_ps(ptr + 8); ++ _3 = _mm_loadu_ps(ptr +12); ++ _MM_TRANSPOSE4_PS(_0,_1,_2,_3); ++ *r = _0; ++ *g = _1; ++ *b = _2; ++ *a = _3; ++ } ++ ++ SI void store4(float* ptr, F r, F g, F b, F a) { ++ _MM_TRANSPOSE4_PS(r,g,b,a); ++ _mm_storeu_ps(ptr + 0, r); ++ _mm_storeu_ps(ptr + 4, g); ++ _mm_storeu_ps(ptr + 8, b); ++ _mm_storeu_ps(ptr +12, a); ++ } ++ + #elif defined(JUMPER_IS_NEON) + template using V = Vec<4, T>; + using F = V; +@@ -1004,6 +1185,15 @@ SI F from_half(U16 h) { + #elif defined(JUMPER_IS_HSW) + return _mm256_cvtph_ps((__m128i)h); + ++// Disabled for now as this is not a particularly hot function ++// and there is no good reason to lock Chromium to POWER9+ yet. ++#elif 0 && defined(JUMPER_IS_VSX) && __has_builtin(__builtin_vsx_xvcvhpsp) ++ #if defined(SK_CPU_LENDIAN) ++ return __builtin_vsx_xvcvhpsp({h[0], 0, h[1], 0, h[2], 0, h[3], 0}); ++ #else ++ return __builtin_vsx_xvcvhpsp({0, h[0], 0, h[1], 0, h[2], 0, h[3]}); ++ #endif ++ + #else + // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias. + U32 sem = expand(h), +@@ -1027,6 +1217,16 @@ SI U16 to_half(F f) { + #elif defined(JUMPER_IS_HSW) + return (U16)_mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION); + ++// Disabled for now as this is not a particularly hot function ++// and there is no good reason to lock Chromium to POWER9+ yet. ++#elif 0 && defined(JUMPER_IS_VSX) && __has_builtin(__builtin_vsx_xvcvsphp) ++ __vector unsigned short v = __builtin_vsx_xvcvsphp(f); ++ #if defined(SK_CPU_LENDIAN) ++ return U16{v[0], v[2], v[4], v[6]}; ++ #else ++ return U16{v[1], v[3], v[5], v[7]}; ++ #endif ++ + #else + // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias. + U32 sem = sk_bit_cast(f), +@@ -1102,7 +1302,7 @@ static constexpr size_t N = sizeof(F) / + // instead of {b,a} on the stack. Narrow stages work best for __vectorcall. + #define ABI __vectorcall + #define JUMPER_NARROW_STAGES 1 +-#elif defined(__x86_64__) || defined(SK_CPU_ARM64) ++#elif defined(__x86_64__) || defined(SK_CPU_ARM64) || defined(SK_CPU_PPC64) + // These platforms are ideal for wider stages, and their default ABI is ideal. + #define ABI + #define JUMPER_NARROW_STAGES 0 +@@ -4856,6 +5056,10 @@ SI F sqrt_(F x) { + float32x4_t lo,hi; + split(x, &lo,&hi); + return join(sqrt(lo), sqrt(hi)); ++#elif defined(JUMPER_IS_VSX) ++ vector float lo,hi; ++ split(x, &lo,&hi); ++ return join(vec_sqrt(lo), vec_sqrt(hi)); + #else + return F{ + sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]), +@@ -4879,6 +5083,10 @@ SI F floor_(F x) { + __m128 lo,hi; + split(x, &lo,&hi); + return join(_mm_floor_ps(lo), _mm_floor_ps(hi)); ++#elif defined(JUMPER_IS_VSX) ++ vector float lo,hi; ++ split(x, &lo,&hi); ++ return join(vec_floor(lo), vec_floor(hi)); + #else + F roundtrip = cast(cast(x)); + return roundtrip - if_then_else(roundtrip > x, F_(1), F_(0)); +@@ -4890,6 +5098,7 @@ SI F floor_(F x) { + // (2 * a * b + (1 << 15)) >> 16 + // The result is a number on [-1, 1). + // Note: on neon this is a saturating multiply while the others are not. ++// Note: for POWER, the code below was borrowed from emmintrin.h + SI I16 scaled_mult(I16 a, I16 b) { + #if defined(JUMPER_IS_SKX) + return (I16)_mm256_mulhrs_epi16((__m256i)a, (__m256i)b); +@@ -4901,6 +5110,22 @@ SI I16 scaled_mult(I16 a, I16 b) { + return vqrdmulhq_s16(a, b); + #elif defined(JUMPER_IS_NEON) + return vqrdmulhq_s16(a, b); ++#elif defined(JUMPER_IS_VSX) ++ const vector unsigned int shift = vec_splats((unsigned int)14); ++ const vector int ones = vec_splats((signed int)1); ++ vector int c = vec_unpackh((vector short)a); ++ vector int d = vec_unpackh((vector short)b); ++ vector int e = vec_unpackl((vector short)b); ++ c = vec_mul(c, d); ++ d = vec_unpackl((vector short)a); ++ d = vec_mul(d, e); ++ c = vec_sr(c, shift); ++ d = vec_sr(d, shift); ++ c = vec_add(c, ones); ++ c = vec_sr(c,(vector unsigned int)ones); ++ d = vec_add(d, ones); ++ d = vec_sr(d,(vector unsigned int)ones); ++ return vec_pack(c, d); + #else + const I32 roundingTerm = I32_(1 << 14); + return cast((cast(a) * cast(b) + roundingTerm) >> 15); +@@ -4922,7 +5147,26 @@ SI U16 constrained_add(I16 a, U16 b) { + SkASSERT(-ib <= ia && ia <= 65535 - ib); + } + #endif ++ ++ // Technically, trying to add a signed and unsigned vector invokes undefined behavior ++ // Just because it sort of seems to work on Intel/ARM on Clang doesn't mean it works everywhere... ++ // FIXME: For added fun, the existing Skia unit tests do NOT properly test for issues in the ++ // lowp bilerp path. Investigate and write an appropriate test case... ++#if defined(JUMPER_IS_VSX) ++ // Most POWER compilers end up doing some kind of width promotion that causes memory corruption ++ // and/or incorrect results. This shows up as snow and general graphics corruption, especially ++ // noticeable when trying to display a PNG at less than 50% size (resize the browser window down ++ // until the artifacts appear). ++ // Take the (likely invisible) loss of precision, convert b to a signed int immediately, and do ++ // a proper saturated add here. This seems to fully resolve the issue for all test cases Raptor ++ // has seen so far... ++ // In half precision mode, this function expects both input arguments to have been divided by ++ // two prior to being called, and returns the output without being multiplied back up by two ++ return vec_adds(a, (I16)b); ++#else ++ // Hic Sunt Dragones! + return b + sk_bit_cast(a); ++#endif + } + + SI F fract(F x) { return x - floor_(x); } +@@ -5778,8 +6022,14 @@ STAGE_GP(bilerp_clamp_8888, const SkRast + // 2^-8 * v = 2^-9 * (tx*(R - L) + (R + L)) + // v = 1/2 * (tx*(R - L) + (R + L)) + auto lerpX = [&](U16 left, U16 right) -> U16 { ++#if defined(JUMPER_IS_VSX) ++ // constrained_add() on POWER is run in half precision mode to avoid undefined behavior ++ I16 width = (I16)(right - left) << 6; ++ U16 middle = (right + left) << 6; ++#else + I16 width = (I16)(right - left) << 7; + U16 middle = (right + left) << 7; ++#endif + // The constrained_add is the most subtle part of lerp. The first term is on the interval + // [-1, 1), and the second term is on the interval is on the interval [0, 1) because + // both terms are too high by a factor of 2 which will be handled below. (Both R and L are +@@ -5791,7 +6041,12 @@ STAGE_GP(bilerp_clamp_8888, const SkRast + U16 v2 = constrained_add(scaled_mult(tx, width), middle) + 1; + // Divide by 2 to calculate v and at the same time bring the intermediate value onto the + // interval [0, 1/2] to set up for the lerpY. ++#if defined(JUMPER_IS_VSX) ++ // constrained_add() on POWER is run in half precision mode to avoid undefined behavior ++ return v2; ++#else + return v2 >> 1; ++#endif + }; + + const uint32_t* ptr; +@@ -5825,9 +6080,15 @@ STAGE_GP(bilerp_clamp_8888, const SkRast + I16 width = (I16)bottom - (I16)top; + U16 middle = bottom + top; + // Add + 0x80 for rounding. ++#if defined(JUMPER_IS_VSX) ++ // constrained_add() on POWER is run in half precision mode to avoid undefined behavior ++ U16 blend = constrained_add(scaled_mult(ty, width) / 2, middle / 2) + (0x80 / 2); ++ return blend >> 7; ++#else + U16 blend = constrained_add(scaled_mult(ty, width), middle) + 0x80; +- + return blend >> 8; ++#endif ++ + }; + + r = lerpY(topR, bottomR); +Index: chromium-122.0.6261.57/third_party/skia/src/opts/SkSwizzler_opts.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/opts/SkSwizzler_opts.h ++++ chromium-122.0.6261.57/third_party/skia/src/opts/SkSwizzler_opts.h +@@ -12,7 +12,10 @@ + #include "src/base/SkVx.h" + #include + +-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 ++#if defined(SK_PPC64_HAS_SSE_COMPAT) ++ #include ++ #include ++#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + #include + #elif defined(SK_ARM_HAS_NEON) + #include +Index: chromium-122.0.6261.57/third_party/skia/src/base/SkVx.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/base/SkVx.h ++++ chromium-122.0.6261.57/third_party/skia/src/base/SkVx.h +@@ -42,7 +42,13 @@ + + #if SKVX_USE_SIMD + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 +- #include ++ #if __PPC64__ ++ #include ++ #include ++ #include ++ #else ++ #include ++ #endif + #elif defined(SK_ARM_HAS_NEON) + #include + #elif defined(__wasm_simd128__) +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkBlitMask_opts_ssse3.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkBlitMask_opts_ssse3.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkBlitMask_opts_ssse3.cpp +@@ -9,7 +9,7 @@ + #include "src/core/SkBlitMask.h" + #include "src/core/SkOptsTargets.h" + +-#if defined(SK_CPU_X86) && !defined(SK_ENABLE_OPTIMIZE_SIZE) ++#if (defined(SK_CPU_X86) || defined(SK_CPU_PPC64)) && !defined(SK_ENABLE_OPTIMIZE_SIZE) + + // The order of these includes is important: + // 1) Select the target CPU architecture by defining SK_OPTS_TARGET and including SkOpts_SetTarget +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkSwizzler_opts_ssse3.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkSwizzler_opts_ssse3.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkSwizzler_opts_ssse3.cpp +@@ -11,7 +11,7 @@ + #include "src/core/SkOptsTargets.h" + #include "src/core/SkSwizzlePriv.h" + +-#if defined(SK_CPU_X86) && !defined(SK_ENABLE_OPTIMIZE_SIZE) ++#if (defined(SK_CPU_X86) || defined(SK_CPU_PPC64)) && !defined(SK_ENABLE_OPTIMIZE_SIZE) + + // The order of these includes is important: + // 1) Select the target CPU architecture by defining SK_OPTS_TARGET and including SkOpts_SetTarget +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkBlitMask_opts.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkBlitMask_opts.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkBlitMask_opts.cpp +@@ -25,7 +25,7 @@ namespace SkOpts { + static bool init() { + #if defined(SK_ENABLE_OPTIMIZE_SIZE) + // All Init_foo functions are omitted when optimizing for size +- #elif defined(SK_CPU_X86) ++ #elif defined(SK_CPU_X86) || defined(SK_CPU_PPC64) + #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSSE3 + if (SkCpu::Supports(SkCpu::SSSE3)) { Init_BlitMask_ssse3(); } + #endif +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkBitmapProcState_opts.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts.cpp +@@ -26,7 +26,7 @@ namespace SkOpts { + static bool init() { + #if defined(SK_ENABLE_OPTIMIZE_SIZE) + // All Init_foo functions are omitted when optimizing for size +- #elif defined(SK_CPU_X86) ++ #elif defined(SK_CPU_X86) || defined(SK_CPU_PPC64) + #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSSE3 + if (SkCpu::Supports(SkCpu::SSSE3)) { Init_BitmapProcState_ssse3(); } + #endif +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkCpu.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkCpu.h ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkCpu.h +@@ -55,7 +55,7 @@ inline bool SkCpu::Supports(uint32_t mas + + // If we mask in compile-time known lower limits, the compiler can + // often compile away this entire function. +-#if SK_CPU_X86 ++#if SK_CPU_X86 || defined(SK_CPU_PPC64) + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 + features |= SSE1; + #endif +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts_hsw.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkBitmapProcState_opts_hsw.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts_hsw.cpp +@@ -8,7 +8,7 @@ + #include "include/private/base/SkFeatures.h" + #include "src/core/SkOptsTargets.h" + +-#if defined(SK_CPU_X86) && !defined(SK_ENABLE_OPTIMIZE_SIZE) ++#if (defined(SK_CPU_X86) || defined(SK_CPU_PPC64)) && !defined(SK_ENABLE_OPTIMIZE_SIZE) + + // The order of these includes is important: + // 1) Select the target CPU architecture by defining SK_OPTS_TARGET and including SkOpts_SetTarget +Index: chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts_ssse3.cpp +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/src/core/SkBitmapProcState_opts_ssse3.cpp ++++ chromium-122.0.6261.57/third_party/skia/src/core/SkBitmapProcState_opts_ssse3.cpp +@@ -8,7 +8,7 @@ + #include "include/private/base/SkFeatures.h" + #include "src/core/SkOptsTargets.h" + +-#if defined(SK_CPU_X86) && !defined(SK_ENABLE_OPTIMIZE_SIZE) ++#if (defined(SK_CPU_X86) || defined(SK_CPU_PPC64)) && !defined(SK_ENABLE_OPTIMIZE_SIZE) + + // The order of these includes is important: + // 1) Select the target CPU architecture by defining SK_OPTS_TARGET and including SkOpts_SetTarget +Index: chromium-122.0.6261.57/third_party/skia/include/private/base/SkFeatures.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/include/private/base/SkFeatures.h ++++ chromium-122.0.6261.57/third_party/skia/include/private/base/SkFeatures.h +@@ -63,6 +63,8 @@ + + #if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) + #define SK_CPU_X86 1 ++#elif defined(__powerpc64__) || defined(__PPC64__) ++ #define SK_CPU_PPC64 1 + #endif + + /** +Index: chromium-122.0.6261.57/third_party/skia/modules/skcms/src/skcms_internals.h +=================================================================== +--- chromium-122.0.6261.57.orig/third_party/skia/modules/skcms/src/skcms_internals.h ++++ chromium-122.0.6261.57/third_party/skia/modules/skcms/src/skcms_internals.h +@@ -46,6 +46,7 @@ extern "C" { + && !defined(__EMSCRIPTEN__) \ + && !defined(__arm__) \ + && !defined(__riscv) \ ++ && !defined(__powerpc64__) \ + && !defined(_WIN32) && !defined(__SYMBIAN32__) + #define SKCMS_HAS_MUSTTAIL 1 + #endif diff --git a/use-sysconf-page-size-on-ppc64.patch b/use-sysconf-page-size-on-ppc64.patch new file mode 100644 index 0000000..67e9f08 --- /dev/null +++ b/use-sysconf-page-size-on-ppc64.patch @@ -0,0 +1,60 @@ +Index: chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h +=================================================================== +--- chromium-120.0.6099.71.orig/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h ++++ chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h +@@ -172,7 +172,11 @@ SystemPageBaseMask() { + return ~SystemPageOffsetMask(); + } + ++#if defined(ARCH_CPU_PPC64) ++constexpr size_t kPageMetadataShift = 6; // 64 bytes per partition page. ++#else + constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page. ++#endif + constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift; + + } // namespace partition_alloc::internal +Index: chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/partition_page.h +=================================================================== +--- chromium-120.0.6099.71.orig/base/allocator/partition_allocator/src/partition_alloc/partition_page.h ++++ chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/partition_page.h +@@ -90,7 +90,11 @@ struct SlotSpanMetadata { + + // CHECK()ed in AllocNewSlotSpan(). + // The maximum number of bits needed to cover all currently supported OSes. ++#if defined(ARCH_CPU_PPC64) ++ static constexpr size_t kMaxSlotsPerSlotSpanBits = 15; ++#else + static constexpr size_t kMaxSlotsPerSlotSpanBits = 13; ++#endif + static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), ""); + + // |marked_full| isn't equivalent to being full. Slot span is marked as full +@@ -104,7 +108,11 @@ struct SlotSpanMetadata { + private: + const uint32_t can_store_raw_size_ : 1; + uint32_t freelist_is_sorted_ : 1; ++#if defined(ARCH_CPU_PPC64) ++ uint32_t unused1_ : (64 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1); ++#else + uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1); ++#endif + // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be + // used. + uint16_t in_empty_cache_ : 1; +Index: chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h +=================================================================== +--- chromium-120.0.6099.71.orig/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h ++++ chromium-120.0.6099.71/base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h +@@ -21,6 +21,11 @@ static constexpr size_t kMaxSlotsPerSlot + // currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code, + // so we use the 16 kiB maximum (64 kiB will crash). + static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 14) / kSmallestBucket; ++#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_PPC64) ++// System page size is not a constant on OpenPOWER systems, but is either 4kiB ++// or 64kiB (1 << 12 or 1 << 16) ++// And PartitionPageSize() is 4 times the OS page size. ++static constexpr size_t kMaxSlotsPerSlotSpan = 4 * (1 << 16) / kSmallestBucket; + #else + // A slot span can "span" multiple PartitionPages, but then its slot size is + // larger, so it doesn't have as many slots.