Skip to content

[libc] Improve memcpy for ARM Cortex-M supporting unaligned accesses. #144872

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jun 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion libc/src/__support/macros/optimization.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_OPTIMIZATION_H
#define LLVM_LIBC_SRC___SUPPORT_MACROS_OPTIMIZATION_H

#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/compiler.h" // LIBC_COMPILER_IS_CLANG

Expand All @@ -30,8 +30,10 @@ LIBC_INLINE constexpr bool expects_bool_condition(T value, T expected) {

#if defined(LIBC_COMPILER_IS_CLANG)
#define LIBC_LOOP_NOUNROLL _Pragma("nounroll")
#define LIBC_LOOP_UNROLL _Pragma("unroll")
#elif defined(LIBC_COMPILER_IS_GCC)
#define LIBC_LOOP_NOUNROLL _Pragma("GCC unroll 0")
#define LIBC_LOOP_UNROLL _Pragma("GCC unroll 2048")
#else
#error "Unhandled compiler"
#endif
Expand Down
1 change: 1 addition & 0 deletions libc/src/string/memory_utils/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ add_header_library(
aarch64/inline_memcpy.h
aarch64/inline_memmove.h
aarch64/inline_memset.h
arm/inline_memcpy.h
generic/aligned_access.h
generic/byte_per_byte.h
inline_bcmp.h
Expand Down
217 changes: 217 additions & 0 deletions libc/src/string/memory_utils/arm/inline_memcpy.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
//===-- Memcpy implementation for arm ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H

#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/optimization.h" // LIBC_LOOP_NOUNROLL
#include "src/string/memory_utils/utils.h" // memcpy_inline, distance_to_align

#include <stddef.h> // size_t

// https://libc.llvm.org/compiler_support.html
// Support for [[likely]] / [[unlikely]]
// [X] GCC 12.2
// [X] Clang 12
// [ ] Clang 11
#define LIBC_ATTR_LIKELY [[likely]]
#define LIBC_ATTR_UNLIKELY [[unlikely]]

#if defined(LIBC_COMPILER_IS_CLANG)
#if LIBC_COMPILER_CLANG_VER < 1200
#undef LIBC_ATTR_LIKELY
#undef LIBC_ATTR_UNLIKELY
#define LIBC_ATTR_LIKELY
#define LIBC_ATTR_UNLIKELY
#endif
#endif

namespace LIBC_NAMESPACE_DECL {

namespace {

LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);

enum Strategy {
ForceWordLdStChain,
AssumeWordAligned,
AssumeUnaligned,
};

template <size_t bytes, Strategy strategy = AssumeUnaligned>
LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
if constexpr (strategy == AssumeUnaligned) {
memcpy_inline<bytes>(assume_aligned<1>(dst), assume_aligned<1>(src));
} else if constexpr (strategy == AssumeWordAligned) {
static_assert(bytes >= kWordSize);
memcpy_inline<bytes>(assume_aligned<kWordSize>(dst),
assume_aligned<kWordSize>(src));
} else if constexpr (strategy == ForceWordLdStChain) {
// We restrict loads/stores to 4 byte to prevent the use of load/store
// multiple (LDM, STM) and load/store double (LDRD, STRD). First, they may
// fault (see notes below) and second, they use more registers which in turn
// adds push/pop instructions in the hot path.
static_assert((bytes % kWordSize == 0) && (bytes >= kWordSize));
LIBC_LOOP_UNROLL
for (size_t i = 0; i < bytes / kWordSize; ++i) {
const size_t offset = i * kWordSize;
memcpy_inline<kWordSize>(dst + offset, src + offset);
}
}
// In the 1, 2, 4 byte copy case, the compiler can fold pointer offsetting
// into the load/store instructions.
// e.g.,
// ldrb r3, [r1], #1
// strb r3, [r0], #1
dst += bytes;
src += bytes;
}

LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
const size_t size) {
LIBC_LOOP_NOUNROLL
for (size_t i = 0; i < size; ++i)
*dst++ = *src++;
}

template <size_t block_size, Strategy strategy>
LIBC_INLINE void copy_blocks_and_update_args(Ptr &dst, CPtr &src,
size_t &size) {
LIBC_LOOP_NOUNROLL
for (size_t i = 0; i < size / block_size; ++i)
copy_and_bump_pointers<block_size, strategy>(dst, src);
// Update `size` once at the end instead of once per iteration.
size %= block_size;
}

LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) {
return cpp::bit_cast<CPtr>(cpp::bit_cast<uintptr_t>(a) |
cpp::bit_cast<uintptr_t>(b));
}

LIBC_INLINE auto misaligned(CPtr a) {
return distance_to_align_down<kWordSize>(a);
}

} // namespace

// Implementation for Cortex-M0, M0+, M1.
// Notes:
// - It compiles down to 196 bytes, but 220 bytes when used through `memcpy`
// that also needs to return the `dst` ptr.
// - These cores do not allow for unaligned loads/stores.
// - When `src` and `dst` are coaligned, we start by aligning them and perform
// bulk copies. We let the compiler know the pointers are aligned so it can
// use load/store multiple (LDM, STM). This significantly increase throughput
// but it also requires more registers and push/pop instructions. This impacts
// latency for small size copies.
// - When `src` and `dst` are misaligned, we align `dst` and recompose words
// using multiple aligned loads. `load_aligned` takes care of endianness
// issues.
[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_low_end(Ptr dst, CPtr src,
size_t size) {
if (size >= 8) {
if (const size_t offset = distance_to_align_up<kWordSize>(dst))
LIBC_ATTR_UNLIKELY {
copy_bytes_and_bump_pointers(dst, src, offset);
size -= offset;
}
const auto src_alignment = distance_to_align_down<kWordSize>(src);
if (src_alignment == 0)
LIBC_ATTR_LIKELY {
// Both `src` and `dst` are now word-aligned.
copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size);
copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size);
copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size);
}
else {
// `dst` is aligned but `src` is not.
LIBC_LOOP_NOUNROLL
while (size >= kWordSize) {
// Recompose word from multiple loads depending on the alignment.
const uint32_t value =
src_alignment == 2
? load_aligned<uint32_t, uint16_t, uint16_t>(src)
: load_aligned<uint32_t, uint8_t, uint16_t, uint8_t>(src);
memcpy_inline<kWordSize>(assume_aligned<kWordSize>(dst), &value);
dst += kWordSize;
src += kWordSize;
size -= kWordSize;
}
}
// Up to 3 bytes may still need to be copied.
// Handling them with the slow loop below.
}
copy_bytes_and_bump_pointers(dst, src, size);
}

// Implementation for Cortex-M3, M4, M7, M23, M33, M35P, M52 with hardware
// support for unaligned loads and stores.
// Notes:
// - It compiles down to 266 bytes.
// - `dst` and `src` are not `__restrict` to prevent the compiler from
// reordering loads/stores.
// - We keep state variables to a strict minimum to keep everything in the free
// registers and prevent costly push / pop.
// - If unaligned single loads/stores to normal memory are supported, unaligned
// accesses for load/store multiple (LDM, STM) and load/store double (LDRD,
// STRD) instructions are generally not supported and will still fault so we
// make sure to restrict unrolling to word loads/stores.
[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_mid_end(Ptr dst, CPtr src,
size_t size) {
if (misaligned(bitwise_or(src, dst)))
LIBC_ATTR_UNLIKELY {
if (size < 8)
LIBC_ATTR_UNLIKELY {
if (size & 1)
copy_and_bump_pointers<1>(dst, src);
if (size & 2)
copy_and_bump_pointers<2>(dst, src);
if (size & 4)
copy_and_bump_pointers<4>(dst, src);
return;
}
if (misaligned(src))
LIBC_ATTR_UNLIKELY {
const size_t offset = distance_to_align_up<kWordSize>(dst);
if (offset & 1)
copy_and_bump_pointers<1>(dst, src);
if (offset & 2)
copy_and_bump_pointers<2>(dst, src);
size -= offset;
}
}
copy_blocks_and_update_args<64, ForceWordLdStChain>(dst, src, size);
copy_blocks_and_update_args<16, ForceWordLdStChain>(dst, src, size);
copy_blocks_and_update_args<4, AssumeUnaligned>(dst, src, size);
if (size & 1)
copy_and_bump_pointers<1>(dst, src);
if (size & 2)
LIBC_ATTR_UNLIKELY
copy_and_bump_pointers<2>(dst, src);
}

[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_,
const void *__restrict src_,
size_t size) {
Ptr dst = cpp::bit_cast<Ptr>(dst_);
CPtr src = cpp::bit_cast<CPtr>(src_);
#ifdef __ARM_FEATURE_UNALIGNED
return inline_memcpy_arm_mid_end(dst, src, size);
#else
return inline_memcpy_arm_low_end(dst, src, size);
#endif
}

} // namespace LIBC_NAMESPACE_DECL

// Cleanup local macros
#undef LIBC_ATTR_LIKELY
#undef LIBC_ATTR_UNLIKELY

#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
3 changes: 3 additions & 0 deletions libc/src/string/memory_utils/inline_memcpy.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
#include "src/string/memory_utils/x86_64/inline_memcpy.h"
#define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY \
inline_memcpy_x86_maybe_interpose_repmovsb
#elif defined(LIBC_TARGET_ARCH_IS_ARM)
#include "src/string/memory_utils/arm/inline_memcpy.h"
#define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY inline_memcpy_arm
#elif defined(LIBC_TARGET_ARCH_IS_AARCH64)
#include "src/string/memory_utils/aarch64/inline_memcpy.h"
#define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY inline_memcpy_aarch64
Expand Down
2 changes: 1 addition & 1 deletion libc/src/string/memory_utils/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ LIBC_INLINE void memcpy_inline(void *__restrict dst,
}

using Ptr = cpp::byte *; // Pointer to raw data.
using CPtr = const cpp::byte *; // Const pointer to raw data.
using CPtr = const cpp::byte *; // Pointer to const raw data.

// This type makes sure that we don't accidentally promote an integral type to
// another one. It is only constructible from the exact T type.
Expand Down
1 change: 1 addition & 0 deletions utils/bazel/llvm-project-overlay/libc/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -4218,6 +4218,7 @@ libc_support_library(
"src/string/memory_utils/aarch64/inline_memcpy.h",
"src/string/memory_utils/aarch64/inline_memmove.h",
"src/string/memory_utils/aarch64/inline_memset.h",
"src/string/memory_utils/arm/inline_memcpy.h",
"src/string/memory_utils/generic/aligned_access.h",
"src/string/memory_utils/generic/byte_per_byte.h",
"src/string/memory_utils/inline_bcmp.h",
Expand Down
Loading