diff options
22 files changed, 1706 insertions, 15 deletions
diff --git a/libc/Android.bp b/libc/Android.bp index d3271ae91..46587b774 100644 --- a/libc/Android.bp +++ b/libc/Android.bp @@ -756,6 +756,14 @@ cc_library_static { srcs: [ "arch-arm64/generic/bionic/__memcpy_chk.S", ], + kryo: { + srcs: [ + "arch-arm64/kryo/bionic/__memcpy_chk.S", + ], + exclude_srcs: [ + "arch-arm64/generic/bionic/__memcpy_chk.S", + ], + }, }, }, } @@ -870,6 +878,19 @@ cc_library_static { "bionic/strchr.cpp", "bionic/strnlen.c", ], + kryo: { + srcs: [ + "arch-arm64/kryo/bionic/__memcpy.S", + "arch-arm64/kryo/bionic/memcpy.S", + "arch-arm64/kryo/bionic/memmove.S", + "arch-arm64/kryo/bionic/wmemmove.S", + ], + exclude_srcs: [ + "arch-arm64/generic/bionic/memcpy.S", + "arch-arm64/generic/bionic/memmove.S", + "arch-arm64/generic/bionic/wmemmove.S", + ], + }, }, x86: { @@ -1758,6 +1779,7 @@ cc_library_headers { "//external/scudo:__subpackages__", "//system/core/debuggerd:__subpackages__", "//system/memory/libmemunreachable:__subpackages__", + "//vendor:__subpackages__", ], host_supported: true, vendor_available: true, @@ -1928,6 +1950,8 @@ genrule { cc_defaults { name: "crt_defaults", + // Disable sdclang: TODO(b/142476859) + sdclang: false, defaults: ["linux_bionic_supported"], vendor_available: true, ramdisk_available: true, diff --git a/libc/arch-arm64/kryo/bionic/__memcpy.S b/libc/arch-arm64/kryo/bionic/__memcpy.S new file mode 100644 index 000000000..1cb2bd137 --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/__memcpy.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +// Prototype: void *__memcpy (void *dst, const void *src, size_t count). + +#include <private/bionic_asm.h> + +ENTRY(__memcpy) + #include "__memcpy_base.S" +END(__memcpy) diff --git a/libc/arch-arm64/kryo/bionic/__memcpy_base.S b/libc/arch-arm64/kryo/bionic/__memcpy_base.S new file mode 100644 index 000000000..f85062408 --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/__memcpy_base.S @@ -0,0 +1,217 @@ +/* Copyright (c) 2012-2013, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +/* + * Copyright (c) 2015 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Assumptions: + * + * ARMv8-a, AArch64, unaligned accesses. + * + */ + +#include <private/bionic_asm.h> + +#define dstin x0 +#define src x1 +#define count x2 +#define dst x3 +#define srcend x4 +#define dstend x5 +#define A_l x6 +#define A_lw w6 +#define A_h x7 +#define A_hw w7 +#define B_l x8 +#define B_lw w8 +#define B_h x9 +#define C_l x10 +#define C_h x11 +#define D_l x12 +#define D_h x13 +#define E_l src +#define E_h count +#define F_l srcend +#define F_h dst +#define tmp1 x9 + +#define L(l) .L ## l + +/* Copies are split into 3 main cases: small copies of up to 16 bytes, + medium copies of 17..96 bytes which are fully unrolled. Large copies + of more than 96 bytes align the destination and use an unrolled loop + processing 64 bytes per iteration. + Small and medium copies read all data before writing, allowing any + kind of overlap, and memmove tailcalls memcpy for these cases as + well as non-overlapping copies. +*/ + + prfm PLDL1KEEP, [src] + add srcend, src, count + add dstend, dstin, count + cmp count, 16 + b.ls L(copy16) + cmp count, 96 + b.hi L(copy_long) + + /* Medium copies: 17..96 bytes. */ + sub tmp1, count, 1 + ldp A_l, A_h, [src] + tbnz tmp1, 6, L(copy96) + ldp D_l, D_h, [srcend, -16] + tbz tmp1, 5, 1f + ldp B_l, B_h, [src, 16] + ldp C_l, C_h, [srcend, -32] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstend, -32] +1: + stp A_l, A_h, [dstin] + stp D_l, D_h, [dstend, -16] + ret + + .p2align 4 + + /* Small copies: 0..16 bytes. */ +L(copy16): + cmp count, 8 + b.lo 1f + ldr A_l, [src] + ldr A_h, [srcend, -8] + str A_l, [dstin] + str A_h, [dstend, -8] + ret + .p2align 4 +1: + tbz count, 2, 1f + ldr A_lw, [src] + ldr A_hw, [srcend, -4] + str A_lw, [dstin] + str A_hw, [dstend, -4] + ret + + /* Copy 0..3 bytes. Use a branchless sequence that copies the same + byte 3 times if count==1, or the 2nd byte twice if count==2. */ +1: + cbz count, 2f + lsr tmp1, count, 1 + ldrb A_lw, [src] + ldrb A_hw, [srcend, -1] + ldrb B_lw, [src, tmp1] + strb A_lw, [dstin] + strb B_lw, [dstin, tmp1] + strb A_hw, [dstend, -1] +2: ret + + .p2align 4 + /* Copy 64..96 bytes. Copy 64 bytes from the start and + 32 bytes from the end. */ +L(copy96): + ldp B_l, B_h, [src, 16] + ldp C_l, C_h, [src, 32] + ldp D_l, D_h, [src, 48] + ldp E_l, E_h, [srcend, -32] + ldp F_l, F_h, [srcend, -16] + stp A_l, A_h, [dstin] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstin, 32] + stp D_l, D_h, [dstin, 48] + stp E_l, E_h, [dstend, -32] + stp F_l, F_h, [dstend, -16] + ret + + /* Align DST to 16 byte alignment so that we don't cross cache line + boundaries on both loads and stores. There are at least 96 bytes + to copy, so copy 16 bytes unaligned and then align. The loop + copies 64 bytes per iteration and prefetches one iteration ahead. */ + + .p2align 4 +L(copy_long): + and tmp1, dstin, 15 + bic dst, dstin, 15 + ldp D_l, D_h, [src] + sub src, src, tmp1 + add count, count, tmp1 /* Count is now 16 too large. */ + ldp A_l, A_h, [src, 16] + stp D_l, D_h, [dstin] + ldp B_l, B_h, [src, 32] + ldp C_l, C_h, [src, 48] + ldp D_l, D_h, [src, 64]! + subs count, count, 128 + 16 /* Test and readjust count. */ + b.ls 2f +1: + stp A_l, A_h, [dst, 16] + ldp A_l, A_h, [src, 16] + stp B_l, B_h, [dst, 32] + ldp B_l, B_h, [src, 32] + stp C_l, C_h, [dst, 48] + ldp C_l, C_h, [src, 48] + stp D_l, D_h, [dst, 64]! + ldp D_l, D_h, [src, 64]! + subs count, count, 64 + b.hi 1b + + /* Write the last full set of 64 bytes. The remainder is at most 64 + bytes, so it is safe to always copy 64 bytes from the end even if + there is just 1 byte left. */ +2: + ldp E_l, E_h, [srcend, -64] + stp A_l, A_h, [dst, 16] + ldp A_l, A_h, [srcend, -48] + stp B_l, B_h, [dst, 32] + ldp B_l, B_h, [srcend, -32] + stp C_l, C_h, [dst, 48] + ldp C_l, C_h, [srcend, -16] + stp D_l, D_h, [dst, 64] + stp E_l, E_h, [dstend, -64] + stp A_l, A_h, [dstend, -48] + stp B_l, B_h, [dstend, -32] + stp C_l, C_h, [dstend, -16] + ret diff --git a/libc/arch-arm64/kryo/bionic/__memcpy_chk.S b/libc/arch-arm64/kryo/bionic/__memcpy_chk.S new file mode 100644 index 000000000..42177758b --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/__memcpy_chk.S @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <private/bionic_asm.h> + +ENTRY(__memcpy_chk) + cmp x2, x3 + bls memcpy + + // Preserve for accurate backtrace. + stp x29, x30, [sp, -16]! + .cfi_def_cfa_offset 16 + .cfi_rel_offset x29, 0 + .cfi_rel_offset x30, 8 + + bl __memcpy_chk_fail +END(__memcpy_chk) diff --git a/libc/arch-arm64/kryo/bionic/memcpy.S b/libc/arch-arm64/kryo/bionic/memcpy.S new file mode 100644 index 000000000..fc487d3a4 --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/memcpy.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +// Prototype: void *memcpy (void *dst, const void *src, size_t count). + +#include <private/bionic_asm.h> + +ENTRY(memcpy) + #include "memcpy_base.S" +END(memcpy) diff --git a/libc/arch-arm64/kryo/bionic/memcpy_base.S b/libc/arch-arm64/kryo/bionic/memcpy_base.S new file mode 100644 index 000000000..70f25d0c7 --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/memcpy_base.S @@ -0,0 +1,246 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef PLDOFFS +#undef PLDOFFS +#endif +#define PLDOFFS (16) + +#ifdef PLDTHRESH +#undef PLDTHRESH +#endif +#define PLDTHRESH (PLDOFFS) + +#ifdef BBTHRESH +#undef BBTHRESH +#endif +#define BBTHRESH (2048/128) + +#if (PLDOFFS < 1) +#error Routine does not support offsets less than 1 +#endif +#if (PLDTHRESH < PLDOFFS) +#error PLD threshold must be greater than or equal to the PLD offset +#endif + +#ifdef PLDSIZE +#undef PLDSIZE +#endif +#define PLDSIZE (128) + +kryo_bb_memcpy: + mov x11, x0 + cmp x2, #4 + blo kryo_bb_lt4 + cmp x2, #16 + blo kryo_bb_lt16 + cmp x2, #32 + blo kryo_bb_16 + cmp x2, #64 + blo kryo_bb_copy_32_a + cmp x2, #128 + blo kryo_bb_copy_64_a + + // we have at least 127 bytes to achieve 128-byte alignment + neg x3, x1 // calculate count to get SOURCE aligned + ands x3, x3, #0x7F + b.eq kryo_bb_source_aligned // already aligned + // alignment fixup, small to large (favorable alignment) + tbz x3, #0, 1f + ldrb w5, [x1], #1 + strb w5, [x0], #1 +1: tbz x3, #1, 2f + ldrh w6, [x1], #2 + strh w6, [x0], #2 +2: tbz x3, #2, 3f + ldr w8, [x1], #4 + str w8, [x0], #4 +3: tbz x3, #3, 4f + ldr x9, [x1], #8 + str x9, [x0], #8 +4: tbz x3, #4, 5f + ldr q7, [x1], #16 + str q7, [x0], #16 +5: tbz x3, #5, 55f + ldp q0, q1, [x1], #32 + stp q0, q1, [x0], #32 +55: tbz x3, #6, 6f + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 +6: subs x2, x2, x3 // fixup count after alignment + b.eq kryo_bb_exit + cmp x2, #128 + blo kryo_bb_copy_64_a +kryo_bb_source_aligned: + lsr x12, x2, #7 + cmp x12, #PLDTHRESH + bls kryo_bb_copy_128_loop_nopld + + cmp x12, #BBTHRESH + bls kryo_bb_prime_pump + + add x14, x0, #0x400 + add x9, x1, #(PLDOFFS*PLDSIZE) + sub x14, x14, x9 + lsl x14, x14, #(21+32) + lsr x14, x14, #(21+32) + add x14, x14, #(PLDOFFS*PLDSIZE) + cmp x12, x14, lsr #7 + bls kryo_bb_prime_pump + + mov x9, #(PLDOFFS) + lsr x13, x14, #7 + subs x9, x13, x9 + bls kryo_bb_prime_pump + + add x10, x1, x14 + bic x10, x10, #0x7F // Round to multiple of PLDSIZE + + sub x12, x12, x14, lsr #7 + cmp x9, x12 + sub x13, x12, x9 + csel x12, x13, x12, LS + csel x9, x12, x9, HI + csel x12, xzr, x12, HI + + prfm PLDL1STRM, [x1, #((PLDOFFS-1)*PLDSIZE)] + prfm PLDL1STRM, [x1, #((PLDOFFS-1)*PLDSIZE+64)] +kryo_bb_copy_128_loop_outer_doublepld: + prfm PLDL1STRM, [x1, #((PLDOFFS)*PLDSIZE)] + prfm PLDL1STRM, [x1, #((PLDOFFS)*PLDSIZE)+64] + subs x9, x9, #1 + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + ldp q4, q5, [x1], #32 + ldp q6, q7, [x1], #32 + prfm PLDL1KEEP, [x10] + prfm PLDL1KEEP, [x10, #64] + add x10, x10, #128 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 + stp q4, q5, [x0], #32 + stp q6, q7, [x0], #32 + bne kryo_bb_copy_128_loop_outer_doublepld + cmp x12, #0 + beq kryo_bb_pop_before_nopld + cmp x12, #(448*1024/128) + bls kryo_bb_copy_128_loop_outer + +kryo_bb_copy_128_loop_ddr: + subs x12, x12, #1 + ldr x3, [x10], #128 + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + ldp q4, q5, [x1], #32 + ldp q6, q7, [x1], #32 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 + stp q4, q5, [x0], #32 + stp q6, q7, [x0], #32 + bne kryo_bb_copy_128_loop_ddr + b kryo_bb_pop_before_nopld + +kryo_bb_prime_pump: + mov x14, #(PLDOFFS*PLDSIZE) + add x10, x1, #(PLDOFFS*PLDSIZE) + bic x10, x10, #0x7F + sub x12, x12, #PLDOFFS + prfum PLDL1KEEP, [x10, #(-1*PLDSIZE)] + prfum PLDL1KEEP, [x10, #(-1*PLDSIZE+64)] + cmp x12, #(448*1024/128) + bhi kryo_bb_copy_128_loop_ddr + +kryo_bb_copy_128_loop_outer: + subs x12, x12, #1 + prfm PLDL1KEEP, [x10] + prfm PLDL1KEEP, [x10, #64] + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + ldp q4, q5, [x1], #32 + ldp q6, q7, [x1], #32 + add x10, x10, #128 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 + stp q4, q5, [x0], #32 + stp q6, q7, [x0], #32 + bne kryo_bb_copy_128_loop_outer + +kryo_bb_pop_before_nopld: + lsr x12, x14, #7 +kryo_bb_copy_128_loop_nopld: + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + ldp q4, q5, [x1], #32 + ldp q6, q7, [x1], #32 + subs x12, x12, #1 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 + stp q4, q5, [x0], #32 + stp q6, q7, [x0], #32 + bne kryo_bb_copy_128_loop_nopld + ands x2, x2, #0x7f + beq kryo_bb_exit + +kryo_bb_copy_64_a: + tbz x2, #6, kryo_bb_copy_32_a + ldp q0, q1, [x1], #32 + ldp q2, q3, [x1], #32 + stp q0, q1, [x0], #32 + stp q2, q3, [x0], #32 +kryo_bb_copy_32_a: + tbz x2, #5, kryo_bb_16 + ldp q0, q1, [x1], #32 + stp q0, q1, [x0], #32 +kryo_bb_16: + tbz x2, #4, kryo_bb_lt16 + ldr q7, [x1], #16 + str q7, [x0], #16 + ands x2, x2, #0x0f + beq kryo_bb_exit +kryo_bb_lt16: + tbz x2, #3, kryo_bb_lt8 + ldr x3, [x1], #8 + str x3, [x0], #8 +kryo_bb_lt8: + tbz x2, #2, kryo_bb_lt4 + ldr w3, [x1], #4 + str w3, [x0], #4 +kryo_bb_lt4: + tbz x2, #1, kryo_bb_lt2 + ldrh w3, [x1], #2 + strh w3, [x0], #2 +kryo_bb_lt2: + tbz x2, #0, kryo_bb_exit + ldrb w3, [x1], #1 + strb w3, [x0], #1 +kryo_bb_exit: + mov x0, x11 + ret + diff --git a/libc/arch-arm64/kryo/bionic/memmove.S b/libc/arch-arm64/kryo/bionic/memmove.S new file mode 100644 index 000000000..e4ceb40df --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/memmove.S @@ -0,0 +1,153 @@ +/* Copyright (c) 2013, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +/* + * Copyright (c) 2015 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Assumptions: + * + * ARMv8-a, AArch64, unaligned accesses, wchar_t is 4 bytes + */ + +#include <private/bionic_asm.h> + +/* Parameters and result. */ +#define dstin x0 +#define src x1 +#define count x2 +#define srcend x3 +#define dstend x4 +#define tmp1 x5 +#define A_l x6 +#define A_h x7 +#define B_l x8 +#define B_h x9 +#define C_l x10 +#define C_h x11 +#define D_l x12 +#define D_h x13 +#define E_l count +#define E_h tmp1 + +/* All memmoves up to 96 bytes are done by __memcpy as it supports overlaps. + Larger backwards copies are also handled by __memcpy. The only remaining + case is forward large copies. The destination is aligned, and an + unrolled loop processes 64 bytes per iteration. +*/ + +#if defined(WMEMMOVE) +ENTRY(wmemmove) + lsl count, count, #2 +#else +ENTRY(memmove) +#endif + sub tmp1, dstin, src + cmp count, 96 + ccmp tmp1, count, 2, hi + b.hs __memcpy + + cbz tmp1, 3f + add dstend, dstin, count + add srcend, src, count + + /* Align dstend to 16 byte alignment so that we don't cross cache line + boundaries on both loads and stores. There are at least 96 bytes + to copy, so copy 16 bytes unaligned and then align. The loop + copies 64 bytes per iteration and prefetches one iteration ahead. */ + + and tmp1, dstend, 15 + ldp D_l, D_h, [srcend, -16] + sub srcend, srcend, tmp1 + sub count, count, tmp1 + ldp A_l, A_h, [srcend, -16] + stp D_l, D_h, [dstend, -16] + ldp B_l, B_h, [srcend, -32] + ldp C_l, C_h, [srcend, -48] + ldp D_l, D_h, [srcend, -64]! + sub dstend, dstend, tmp1 + subs count, count, 128 + b.ls 2f + nop +1: + stp A_l, A_h, [dstend, -16] + ldp A_l, A_h, [srcend, -16] + stp B_l, B_h, [dstend, -32] + ldp B_l, B_h, [srcend, -32] + stp C_l, C_h, [dstend, -48] + ldp C_l, C_h, [srcend, -48] + stp D_l, D_h, [dstend, -64]! + ldp D_l, D_h, [srcend, -64]! + subs count, count, 64 + b.hi 1b + + /* Write the last full set of 64 bytes. The remainder is at most 64 + bytes, so it is safe to always copy 64 bytes from the start even if + there is just 1 byte left. */ +2: + ldp E_l, E_h, [src, 48] + stp A_l, A_h, [dstend, -16] + ldp A_l, A_h, [src, 32] + stp B_l, B_h, [dstend, -32] + ldp B_l, B_h, [src, 16] + stp C_l, C_h, [dstend, -48] + ldp C_l, C_h, [src] + stp D_l, D_h, [dstend, -64] + stp E_l, E_h, [dstin, 48] + stp A_l, A_h, [dstin, 32] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstin] +3: ret + +#if defined(WMEMMOVE) +END(wmemmove) +#else +END(memmove) +#endif diff --git a/libc/arch-arm64/kryo/bionic/wmemmove.S b/libc/arch-arm64/kryo/bionic/wmemmove.S new file mode 100644 index 000000000..e4f67f759 --- /dev/null +++ b/libc/arch-arm64/kryo/bionic/wmemmove.S @@ -0,0 +1,30 @@ +/* Copyright (c) 2014, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#define WMEMMOVE +#include "memmove.S" +#undef WMEMMOVE diff --git a/libc/arch-arm64/kryo300/bionic/__memcpy.S b/libc/arch-arm64/kryo300/bionic/__memcpy.S new file mode 100644 index 000000000..1cb2bd137 --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/__memcpy.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +// Prototype: void *__memcpy (void *dst, const void *src, size_t count). + +#include <private/bionic_asm.h> + +ENTRY(__memcpy) + #include "__memcpy_base.S" +END(__memcpy) diff --git a/libc/arch-arm64/kryo300/bionic/__memcpy_base.S b/libc/arch-arm64/kryo300/bionic/__memcpy_base.S new file mode 100644 index 000000000..f85062408 --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/__memcpy_base.S @@ -0,0 +1,217 @@ +/* Copyright (c) 2012-2013, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +/* + * Copyright (c) 2015 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Assumptions: + * + * ARMv8-a, AArch64, unaligned accesses. + * + */ + +#include <private/bionic_asm.h> + +#define dstin x0 +#define src x1 +#define count x2 +#define dst x3 +#define srcend x4 +#define dstend x5 +#define A_l x6 +#define A_lw w6 +#define A_h x7 +#define A_hw w7 +#define B_l x8 +#define B_lw w8 +#define B_h x9 +#define C_l x10 +#define C_h x11 +#define D_l x12 +#define D_h x13 +#define E_l src +#define E_h count +#define F_l srcend +#define F_h dst +#define tmp1 x9 + +#define L(l) .L ## l + +/* Copies are split into 3 main cases: small copies of up to 16 bytes, + medium copies of 17..96 bytes which are fully unrolled. Large copies + of more than 96 bytes align the destination and use an unrolled loop + processing 64 bytes per iteration. + Small and medium copies read all data before writing, allowing any + kind of overlap, and memmove tailcalls memcpy for these cases as + well as non-overlapping copies. +*/ + + prfm PLDL1KEEP, [src] + add srcend, src, count + add dstend, dstin, count + cmp count, 16 + b.ls L(copy16) + cmp count, 96 + b.hi L(copy_long) + + /* Medium copies: 17..96 bytes. */ + sub tmp1, count, 1 + ldp A_l, A_h, [src] + tbnz tmp1, 6, L(copy96) + ldp D_l, D_h, [srcend, -16] + tbz tmp1, 5, 1f + ldp B_l, B_h, [src, 16] + ldp C_l, C_h, [srcend, -32] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstend, -32] +1: + stp A_l, A_h, [dstin] + stp D_l, D_h, [dstend, -16] + ret + + .p2align 4 + + /* Small copies: 0..16 bytes. */ +L(copy16): + cmp count, 8 + b.lo 1f + ldr A_l, [src] + ldr A_h, [srcend, -8] + str A_l, [dstin] + str A_h, [dstend, -8] + ret + .p2align 4 +1: + tbz count, 2, 1f + ldr A_lw, [src] + ldr A_hw, [srcend, -4] + str A_lw, [dstin] + str A_hw, [dstend, -4] + ret + + /* Copy 0..3 bytes. Use a branchless sequence that copies the same + byte 3 times if count==1, or the 2nd byte twice if count==2. */ +1: + cbz count, 2f + lsr tmp1, count, 1 + ldrb A_lw, [src] + ldrb A_hw, [srcend, -1] + ldrb B_lw, [src, tmp1] + strb A_lw, [dstin] + strb B_lw, [dstin, tmp1] + strb A_hw, [dstend, -1] +2: ret + + .p2align 4 + /* Copy 64..96 bytes. Copy 64 bytes from the start and + 32 bytes from the end. */ +L(copy96): + ldp B_l, B_h, [src, 16] + ldp C_l, C_h, [src, 32] + ldp D_l, D_h, [src, 48] + ldp E_l, E_h, [srcend, -32] + ldp F_l, F_h, [srcend, -16] + stp A_l, A_h, [dstin] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstin, 32] + stp D_l, D_h, [dstin, 48] + stp E_l, E_h, [dstend, -32] + stp F_l, F_h, [dstend, -16] + ret + + /* Align DST to 16 byte alignment so that we don't cross cache line + boundaries on both loads and stores. There are at least 96 bytes + to copy, so copy 16 bytes unaligned and then align. The loop + copies 64 bytes per iteration and prefetches one iteration ahead. */ + + .p2align 4 +L(copy_long): + and tmp1, dstin, 15 + bic dst, dstin, 15 + ldp D_l, D_h, [src] + sub src, src, tmp1 + add count, count, tmp1 /* Count is now 16 too large. */ + ldp A_l, A_h, [src, 16] + stp D_l, D_h, [dstin] + ldp B_l, B_h, [src, 32] + ldp C_l, C_h, [src, 48] + ldp D_l, D_h, [src, 64]! + subs count, count, 128 + 16 /* Test and readjust count. */ + b.ls 2f +1: + stp A_l, A_h, [dst, 16] + ldp A_l, A_h, [src, 16] + stp B_l, B_h, [dst, 32] + ldp B_l, B_h, [src, 32] + stp C_l, C_h, [dst, 48] + ldp C_l, C_h, [src, 48] + stp D_l, D_h, [dst, 64]! + ldp D_l, D_h, [src, 64]! + subs count, count, 64 + b.hi 1b + + /* Write the last full set of 64 bytes. The remainder is at most 64 + bytes, so it is safe to always copy 64 bytes from the end even if + there is just 1 byte left. */ +2: + ldp E_l, E_h, [srcend, -64] + stp A_l, A_h, [dst, 16] + ldp A_l, A_h, [srcend, -48] + stp B_l, B_h, [dst, 32] + ldp B_l, B_h, [srcend, -32] + stp C_l, C_h, [dst, 48] + ldp C_l, C_h, [srcend, -16] + stp D_l, D_h, [dst, 64] + stp E_l, E_h, [dstend, -64] + stp A_l, A_h, [dstend, -48] + stp B_l, B_h, [dstend, -32] + stp C_l, C_h, [dstend, -16] + ret diff --git a/libc/arch-arm64/kryo300/bionic/__memcpy_chk.S b/libc/arch-arm64/kryo300/bionic/__memcpy_chk.S new file mode 100644 index 000000000..42177758b --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/__memcpy_chk.S @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <private/bionic_asm.h> + +ENTRY(__memcpy_chk) + cmp x2, x3 + bls memcpy + + // Preserve for accurate backtrace. + stp x29, x30, [sp, -16]! + .cfi_def_cfa_offset 16 + .cfi_rel_offset x29, 0 + .cfi_rel_offset x30, 8 + + bl __memcpy_chk_fail +END(__memcpy_chk) diff --git a/libc/arch-arm64/kryo300/bionic/memcpy.S b/libc/arch-arm64/kryo300/bionic/memcpy.S new file mode 100644 index 000000000..fc487d3a4 --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/memcpy.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +// Prototype: void *memcpy (void *dst, const void *src, size_t count). + +#include <private/bionic_asm.h> + +ENTRY(memcpy) + #include "memcpy_base.S" +END(memcpy) diff --git a/libc/arch-arm64/kryo300/bionic/memcpy_base.S b/libc/arch-arm64/kryo300/bionic/memcpy_base.S new file mode 100644 index 000000000..4312bc1c1 --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/memcpy_base.S @@ -0,0 +1,308 @@ +/* Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of The Linux Foundation nor the names of its contributors may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define A53_OPT + +#define TEST (0x200) + +#ifdef IL_512 +#define IL_DIST (0x200) +#define PRFM_SUB (64*1) +#define PRFM_HI_DIST (0x10*2) +#else +#define IL_DIST (0x400) +#define PRFM_SUB (64*2) +#define PRFM_HI_DIST (0x14*2) +#endif + +#define PRFM_COPY + +//Configurable parameters +#define PLD_COPY_SIZE (0x400 * 0x100 * 1) + + PRFM PLDL1KEEP, [X1] + CMP X2, (320*2) + B.HI copy_long + CMP X2, 16 + B.LS copy16 + PRFM PSTL1KEEP, [X0] + + LDP X6, X7, [X1] + ADD X4, X1, X2 + LDP X12, X13, [X4, -16] + SUBS X2, X2, 32 + ADD X3, X0, X2 + BGT small_copy + STP X6, X7, [X0] + STP X12, X13, [X3, 16] + RET + + .p2align 4 +small_copy: + SUBS X2, X2, #32 + BGT 2f + LDP X10, X11, [X4, -32] + LDP X8, X9, [X1, 16] + STP X6, X7, [X0] + STP X8, X9, [X0, 16] + STP X10, X11, [X3] + STP X12, X13, [X3, 16] + RET +2: + BIC X5, X1, #0xF + LDP X8, X9, [X5, 16] + LDP X10, X11, [X4, -32] + PRFM PSTL1KEEP, [X0, #80] + STP X6, X7, [X0] + LDP X6, X7, [X5, 32]! + AND X14, X1, #0xF + SUB X15, X0, X14 + ADD X2, X2, X14 + SUBS X2, X2, #0x10 + BLE 2f + PRFM PLDL1KEEP, [X5, #48] + PRFM PSTL1KEEP, [X3] +1: + STP X8, X9, [X15, 16] + LDP X8, X9, [X5, 16] + STP X6, X7, [X15, 32]! + LDP X6, X7, [X5, 32]! + SUBS X2, X2, 32 + BGT 1b +2: + STP X8, X9, [X15, 16] + STP X6, X7, [X15, 32] + STP X10, X11, [X3] + STP X12, X13, [X3, 16] + RET + + .p2align 6 + /* Small copies: 0..16 bytes. */ +copy16: + CBZ X2, 2f + PRFM PSTL1KEEP, [X0] + ADD X3, X0, X2 + ADD X4, X1, X2 + CMP X2, 8 + B.LO 1f + LDR X6, [X1] + LDR X7, [X4, -8] + STR X6, [X0] + STR X7, [X3, -8] + RET +1: + TBZ X2, 2, 1f + LDR W6, [X1] + LDR W7, [X4, -4] + STR W6, [X0] + STR W7, [X3, -4] + RET + /* Copy 0..3 bytes. Use a branchless sequence that copies the same + byte 3 times if count==1, or the 2nd byte twice if count==2. */ +1: + LSR X9, X2, 1 + LDRB W6, [X1] + LDRB W7, [X4, -1] + LDRB W8, [X1, X9] + STRB W6, [X0] + STRB W8, [X0, x9] + STRB W7, [X3, -1] +2: RET + + .p2align 6 +copy_long: +#ifdef PRFM_COPY + CMP X2, #PLD_COPY_SIZE + BGE prfm_cpy +#endif + LDP X12, X13, [X1] + PRFM PLDL1KEEP, [X1, #64] + BIC X5, X1, #0xF + AND X14, X1, #0xF + SUB X15, X0, X14 + LDP X6, X7, [X5, 16] + LDP X8, X9, [X5, 32] + PRFM PLDL1KEEP, [X5, #144] + STP X12, X13, [X0] + LDP X10, X11, [X5, 48] + LDP X12, X13, [X5, 64]! + ADD X2, X2, X14 + SUB X2, X2, #144 + PRFM PLDL1KEEP, [X5, #144] + ADD X4, X5, X2 + ADD X3, X15, X2 +1: + STP X6, X7, [X15, 16] + LDP X6, X7, [X5, 16] + STP X8, X9, [X15, 32] + LDP X8, X9, [X5, 32] + STP X10, X11, [X15, 48] + LDP X10, X11, [X5, 48] + STP X12, X13, [X15, 64]! + LDP X12, X13, [X5, 64]! + SUBS X2, X2, 64 + BGT 1b + LDP X1, X14, [X4, 16] + STP X6, X7, [X15, 16] + LDP X6, X7, [X4, 32] + STP X8, X9, [X15, 32] + LDP X8, X9, [X4, 48] + STP X10, X11, [X15, 48] + LDP X10, X11, [X4, 64] + STP X12, X13, [X15, 64] + STP X1, X14, [X3, 80] + STP X6, X7, [X3, 96] + STP X8, X9, [X3, 112] + STP X10, X11, [X3, 128] + RET + + .p2align 6 +prfm_cpy: + NEG X4, X1 + ANDS X4, X4, #0x3F + ADD X15, X0, X4 + PRFM PLDL1KEEP, [X1, 64] + BEQ dst_64_bytealigned + SUB X6, X1, #0x10 + LDP X7, X8, [X6, #0x10]! + ADD X1, X1, X4 + SUB X2, X2, X4 + SUB X5, X0, #0x10 + SUBS X4, X4, #0x10 + BLE 2f +1: + STP X7, X8, [X5, #0x10]! + LDP X7, X8, [X6, #0x10]! + SUBS X4, X4, #0x10 + BGT 1b +2: + STP X7, X8, [X5, #0x10] +dst_64_bytealigned: + MOV X4, #(IL_DIST) + SUB X3, X4, #1 + AND X6, X15, X3 + AND X4, X1, X3 + PRFM PLDL1KEEP, [x1, 128] + SUBS X6, X4, X6 + SUB X7, XZR, X6 + CSEL X7, X7, X6, LT + PRFM PLDL1KEEP, [x1, 192] + MOV X4, #(IL_DIST) + EOR X8, X15, X1 + ANDS X8, X8, X4 + CSEL X11, X4, XZR, EQ + PRFM PLDL1KEEP, [x1, 256] + LSR X5, X4, 1 + SUB X9, XZR, X9 + CSEL X9, XZR, X9, EQ + PRFM PLDL1KEEP, [x1, 320] + CMP X6, X9 + BLT 1f + ADDS X8, X8, XZR + CSEL X9, X7, X6, EQ + SUB X7, XZR, X9 + ADD X11, X4, X11 + BNE 1f + ADD X11, X11, X4 + CMP X6, X5 + CSEL X11, X4, X11, LT +1: + ADD X6, X11, X7 + LDP X7, X8, [X1] + LDP X9, X10, [X1, #16] + PRFM PLDL1KEEP, [x1, 384] + + ADD X6, X6, #(PRFM_HI_DIST << 6) + BIC X6, X6, #0x3F + ADD X3, X1, X6 + SUB X3, X3, #(PRFM_SUB) + PRFM PLDL1KEEP, [x1, 448] + SUB X4, X3, X1 + SUB X4, X4, #(TEST) + SUB X5, X2, X4 + SUB X5, X5, X6 + PRFM PLDL1KEEP, [x1, 512] + LDP X11, X12, [X1, #32] + LDP X13, X14, [X1, #48]! + SUB X15, X15, #16 + SUB X4, X4, #0x40 * 2 + +double_pld: + PRFM PLDL1KEEP, [X1, #(TEST + 16)] + STP X7, X8, [X15, #16] + LDP X7, X8, [X1, #16] + STP X9, X10, [X15, #32] + LDP X9, X10, [X1, #32] + PRFM PLDL3KEEP, [X3] + ADD X3, X3, #64 + STP X11, X12, [X15, #48] + LDP X11, X12, [X1, #48] + STP X13, X14, [X15, #64]! + LDP X13, X14, [X1, #64]! + SUBS X4, X4, #0x40 + BGT double_pld +single_pld: +prfm_copy_loop: + PRFM PLDL3KEEP, [X3] + ADD X3, X3, #64 + STP X7, X8, [X15, #16] + LDP X7, X8, [X1, #16] + STP X9, X10, [X15, #32] + LDP X9, X10, [X1, #32] + STP X11, X12, [X15, #48] + LDP X11, X12, [X1, #48] + STP X13, X14, [X15, #64]! + LDP X13, X14, [X1, #64]! + SUBS X5, X5, #0x40 + BGT prfm_copy_loop +prfm_done: + PRFM PLDL3KEEP, [X3] +plded_copy_loop: + STP X7, X8, [X15, #16] + LDP X7, X8, [X1, #16] + STP X9, X10, [X15, #32] + LDP X9, X10, [X1, #32] + STP X11, X12, [X15, #48] + LDP X11, X12, [X1, #48] + STP X13, X14, [X15, #64]! + LDP X13, X14, [X1, #64]! + SUBS X6, X6, #0x40 + BGT plded_copy_loop + ADD X4, X1, X5 + STP X7, X8, [X15, #16] + LDP X1, X2, [X4, #16] + STP X9, X10, [X15, 32] + LDP X7, X8, [X4, 32] + STP X11, X12, [X15, 48] + LDP X9, X10, [X4, 48] + STP X13, X14, [X15, 64] + LDP X11, X12, [X4, 64] + ADD X3, X15, X5 + STP X1, X2, [X3, 80] + STP X7, X8, [X3, 96] + STP X9, X10, [X3, 112] + STP X11, X12, [X3, 128] + RET diff --git a/libc/arch-arm64/kryo300/bionic/memmove.S b/libc/arch-arm64/kryo300/bionic/memmove.S new file mode 100644 index 000000000..e4ceb40df --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/memmove.S @@ -0,0 +1,153 @@ +/* Copyright (c) 2013, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +/* + * Copyright (c) 2015 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Assumptions: + * + * ARMv8-a, AArch64, unaligned accesses, wchar_t is 4 bytes + */ + +#include <private/bionic_asm.h> + +/* Parameters and result. */ +#define dstin x0 +#define src x1 +#define count x2 +#define srcend x3 +#define dstend x4 +#define tmp1 x5 +#define A_l x6 +#define A_h x7 +#define B_l x8 +#define B_h x9 +#define C_l x10 +#define C_h x11 +#define D_l x12 +#define D_h x13 +#define E_l count +#define E_h tmp1 + +/* All memmoves up to 96 bytes are done by __memcpy as it supports overlaps. + Larger backwards copies are also handled by __memcpy. The only remaining + case is forward large copies. The destination is aligned, and an + unrolled loop processes 64 bytes per iteration. +*/ + +#if defined(WMEMMOVE) +ENTRY(wmemmove) + lsl count, count, #2 +#else +ENTRY(memmove) +#endif + sub tmp1, dstin, src + cmp count, 96 + ccmp tmp1, count, 2, hi + b.hs __memcpy + + cbz tmp1, 3f + add dstend, dstin, count + add srcend, src, count + + /* Align dstend to 16 byte alignment so that we don't cross cache line + boundaries on both loads and stores. There are at least 96 bytes + to copy, so copy 16 bytes unaligned and then align. The loop + copies 64 bytes per iteration and prefetches one iteration ahead. */ + + and tmp1, dstend, 15 + ldp D_l, D_h, [srcend, -16] + sub srcend, srcend, tmp1 + sub count, count, tmp1 + ldp A_l, A_h, [srcend, -16] + stp D_l, D_h, [dstend, -16] + ldp B_l, B_h, [srcend, -32] + ldp C_l, C_h, [srcend, -48] + ldp D_l, D_h, [srcend, -64]! + sub dstend, dstend, tmp1 + subs count, count, 128 + b.ls 2f + nop +1: + stp A_l, A_h, [dstend, -16] + ldp A_l, A_h, [srcend, -16] + stp B_l, B_h, [dstend, -32] + ldp B_l, B_h, [srcend, -32] + stp C_l, C_h, [dstend, -48] + ldp C_l, C_h, [srcend, -48] + stp D_l, D_h, [dstend, -64]! + ldp D_l, D_h, [srcend, -64]! + subs count, count, 64 + b.hi 1b + + /* Write the last full set of 64 bytes. The remainder is at most 64 + bytes, so it is safe to always copy 64 bytes from the start even if + there is just 1 byte left. */ +2: + ldp E_l, E_h, [src, 48] + stp A_l, A_h, [dstend, -16] + ldp A_l, A_h, [src, 32] + stp B_l, B_h, [dstend, -32] + ldp B_l, B_h, [src, 16] + stp C_l, C_h, [dstend, -48] + ldp C_l, C_h, [src] + stp D_l, D_h, [dstend, -64] + stp E_l, E_h, [dstin, 48] + stp A_l, A_h, [dstin, 32] + stp B_l, B_h, [dstin, 16] + stp C_l, C_h, [dstin] +3: ret + +#if defined(WMEMMOVE) +END(wmemmove) +#else +END(memmove) +#endif diff --git a/libc/arch-arm64/kryo300/bionic/wmemmove.S b/libc/arch-arm64/kryo300/bionic/wmemmove.S new file mode 100644 index 000000000..e4f67f759 --- /dev/null +++ b/libc/arch-arm64/kryo300/bionic/wmemmove.S @@ -0,0 +1,30 @@ +/* Copyright (c) 2014, Linaro Limited + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#define WMEMMOVE +#include "memmove.S" +#undef WMEMMOVE diff --git a/libc/bionic/gwp_asan_wrappers.cpp b/libc/bionic/gwp_asan_wrappers.cpp index d3e6a14ce..201c7ef57 100644 --- a/libc/bionic/gwp_asan_wrappers.cpp +++ b/libc/bionic/gwp_asan_wrappers.cpp @@ -202,12 +202,15 @@ static const MallocDispatch gwp_asan_dispatch __attribute__((unused)) = { // The probability (1 / kProcessSampleRate) that a process will be ranodmly // selected for sampling. kProcessSampleRate should always be a power of two to // avoid modulo bias. -static constexpr uint8_t kProcessSampleRate = 128; +//static constexpr uint8_t kProcessSampleRate = 128; bool ShouldGwpAsanSampleProcess() { - uint8_t random_number; - __libc_safe_arc4random_buf(&random_number, sizeof(random_number)); - return random_number % kProcessSampleRate == 0; + //Returning false from this function will skip the sampling process, thus + //effectively disable gwp-asan. + /*uint8_t random_number; + __libc_safe_arc4random_buf(&random_number, sizeof(random_number)); + return random_number % kProcessSampleRate == 0;*/ + return false; } bool MaybeInitGwpAsanFromLibc(libc_globals* globals) { diff --git a/libc/dns/resolv/res_cache.c b/libc/dns/resolv/res_cache.c index f4c590f60..c1c2059ef 100644 --- a/libc/dns/resolv/res_cache.c +++ b/libc/dns/resolv/res_cache.c @@ -28,6 +28,7 @@ #include "resolv_cache.h" +#include <ctype.h> #include <resolv.h> #include <stdarg.h> #include <stdio.h> @@ -453,6 +454,22 @@ typedef struct { const uint8_t* cursor; } DnsPacket; +static int +memcasecmp( const unsigned char *s1, const unsigned char *s2, int len ) +{ + for ( int i = 0; i < len; i++ ) + { + int ch1 = *s1++; + int ch2 = *s2++; + int d = tolower(ch1) - tolower(ch2); + if (d != 0) + { + return d; + } + } + return 0; +} + static void _dnsPacket_init( DnsPacket* packet, const uint8_t* buff, int bufflen ) { @@ -765,6 +782,7 @@ _dnsPacket_hashBytes( DnsPacket* packet, int numBytes, unsigned hash ) while (numBytes > 0 && p < end) { hash = hash*FNV_MULT ^ *p++; + numBytes -= 1; } packet->cursor = p; return hash; @@ -778,14 +796,12 @@ _dnsPacket_hashQName( DnsPacket* packet, unsigned hash ) const uint8_t* end = packet->end; for (;;) { - int c; - if (p >= end) { /* should not happen */ XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__); break; } - c = *p++; + int c = *p++; if (c == 0) break; @@ -799,9 +815,12 @@ _dnsPacket_hashQName( DnsPacket* packet, unsigned hash ) __FUNCTION__); break; } + while (c > 0) { - hash = hash*FNV_MULT ^ *p++; - c -= 1; + int ch = *p++; + ch = tolower(ch); + hash = hash * (FNV_MULT ^ ch); + c--; } } packet->cursor = p; @@ -888,14 +907,12 @@ _dnsPacket_isEqualDomainName( DnsPacket* pack1, DnsPacket* pack2 ) const uint8_t* end2 = pack2->end; for (;;) { - int c1, c2; - if (p1 >= end1 || p2 >= end2) { XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__); break; } - c1 = *p1++; - c2 = *p2++; + int c1 = *p1++; + int c2 = *p2++; if (c1 != c2) break; @@ -913,7 +930,7 @@ _dnsPacket_isEqualDomainName( DnsPacket* pack1, DnsPacket* pack2 ) __FUNCTION__); break; } - if (memcmp(p1, p2, c1) != 0) + if (memcasecmp(p1, p2, c1) != 0) break; p1 += c1; p2 += c1; diff --git a/libc/include/arpa/inet.h b/libc/include/arpa/inet.h index db054c9e1..7716b9445 100644 --- a/libc/include/arpa/inet.h +++ b/libc/include/arpa/inet.h @@ -33,6 +33,7 @@ #include <stdint.h> #include <sys/cdefs.h> #include <sys/types.h> +#include <inaddr.h> __BEGIN_DECLS diff --git a/libc/include/inaddr.h b/libc/include/inaddr.h new file mode 100644 index 000000000..524addabf --- /dev/null +++ b/libc/include/inaddr.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _INADDR_H_ +#define _INADDR_H_ + +#include <stdint.h> + +typedef uint32_t in_addr_t; + +#endif diff --git a/libc/malloc_debug/DebugData.h b/libc/malloc_debug/DebugData.h index 13bba48df..e81bf404e 100644 --- a/libc/malloc_debug/DebugData.h +++ b/libc/malloc_debug/DebugData.h @@ -98,3 +98,9 @@ class DebugData { }; extern DebugData* g_debug; + +// The minimum and maximum allocation sizes +// for which backtrace will be recorded. +// They default to 0 and SIZE_MAX respectively +extern size_t g_min_alloc_to_record; +extern size_t g_max_alloc_to_record; diff --git a/libc/malloc_debug/PointerData.cpp b/libc/malloc_debug/PointerData.cpp index 4f81ff7ff..a15b1b13a 100644 --- a/libc/malloc_debug/PointerData.cpp +++ b/libc/malloc_debug/PointerData.cpp @@ -197,7 +197,10 @@ void PointerData::Add(const void* ptr, size_t pointer_size) { uintptr_t pointer = reinterpret_cast<uintptr_t>(ptr); size_t hash_index = 0; if (backtrace_enabled_) { - hash_index = AddBacktrace(g_debug->config().backtrace_frames()); + if ((pointer_size >= g_min_alloc_to_record) && + (pointer_size <= g_max_alloc_to_record)) { + hash_index = AddBacktrace(g_debug->config().backtrace_frames()); + } } std::lock_guard<std::mutex> pointer_guard(pointer_mutex_); diff --git a/libc/malloc_debug/malloc_debug.cpp b/libc/malloc_debug/malloc_debug.cpp index 609f030bf..b27853a9a 100644 --- a/libc/malloc_debug/malloc_debug.cpp +++ b/libc/malloc_debug/malloc_debug.cpp @@ -39,6 +39,8 @@ #include <sys/syscall.h> #include <unistd.h> +#include <sys/system_properties.h> + #include <mutex> #include <vector> @@ -66,6 +68,10 @@ DebugData* g_debug; bool* g_zygote_child; const MallocDispatch* g_dispatch; + +size_t g_min_alloc_to_record = 0; +size_t g_max_alloc_to_record = SIZE_MAX; + // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ @@ -325,6 +331,23 @@ bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child, // of different error cases. backtrace_startup(); + char min_alloc_to_record[10]; + if (__system_property_get("libc.debug.malloc.minalloctorecord", min_alloc_to_record)) { + g_min_alloc_to_record = atoi(min_alloc_to_record); + } + + char max_alloc_to_record[10]; + if (__system_property_get("libc.debug.malloc.maxalloctorecord", max_alloc_to_record)) { + g_max_alloc_to_record = atoi(max_alloc_to_record); + } + + if (g_min_alloc_to_record > g_max_alloc_to_record) { + error_log("%s: min_alloc_to_record > max_alloc_to_record!," + "reverting back to default limits", getprogname()); + g_min_alloc_to_record = 0; + g_max_alloc_to_record = SIZE_MAX; + } + if (g_debug->config().options() & VERBOSE) { info_log("%s: malloc debug enabled", getprogname()); } |