aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.vnet.ibm.com>2016-05-24 10:39:13 +0200
committerStefan Liebler <stli@linux.vnet.ibm.com>2016-05-24 10:39:13 +0200
commit4c011268960c6f24650672597deed756f21ad363 (patch)
treec4c8aba80f0dbdd15fe0bbe2f9383dd6d0091b15 /sysdeps/s390/s390-32/memcpy.S
parentS390: Do not call memcpy, memcmp, memset within libc.so via ifunc-plt. (diff)
downloadglibc-4c011268960c6f24650672597deed756f21ad363.tar.gz
glibc-4c011268960c6f24650672597deed756f21ad363.tar.bz2
glibc-4c011268960c6f24650672597deed756f21ad363.zip
S390: Implement mempcpy with help of memcpy. [BZ #19765]
There exist optimized memcpy functions on s390, but no optimized mempcpy. This patch adds mempcpy entry points in memcpy.S files, which use the memcpy implementation. Now mempcpy itself is also an IFUNC function as memcpy is and the variants are listed in ifunc-impl-list.c. The s390 string.h does not define _HAVE_STRING_ARCH_mempcpy. Instead mempcpy string/string.h inlines memcpy() + n. If n is constant and small enough, GCC emits instructions like mvi or mvc and avoids the function call to memcpy. If n is not constant, then memcpy is called and n is added afterwards. If _HAVE_STRING_ARCH_mempcpy would be defined, mempcpy would be called in every case. According to PR70140 "Inefficient expansion of __builtin_mempcpy" (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70140) GCC should handle a call to mempcpy in the same way as memcpy. Then either the mempcpy macro in string/string.h has to be removed or _HAVE_STRING_ARCH_mempcpy has to be defined for S390. ChangeLog: [BZ #19765] * sysdeps/s390/mempcpy.S: New File. * sysdeps/s390/multiarch/mempcpy.c: Likewise. * sysdeps/s390/multiarch/Makefile (sysdep_routines): Add mempcpy. * sysdeps/s390/multiarch/ifunc-impl-list.c (__libc_ifunc_impl_list): Add mempcpy variants. * sysdeps/s390/s390-32/memcpy.S: Add mempcpy entry point. (memcpy): Adjust to be usable from mempcpy entry point. (__memcpy_mvcle): Likewise. * sysdeps/s390/s390-64/memcpy.S: Likewise. * sysdeps/s390/s390-32/multiarch/memcpy-s390.S: Add entry points ____mempcpy_z196, ____mempcpy_z10 and add __GI_ symbols for mempcpy. (__memcpy_z196): Adjust to be usable from mempcpy entry point. (__memcpy_z10): Likewise. * sysdeps/s390/s390-64/multiarch/memcpy-s390x.S: Likewise.
Diffstat (limited to 'sysdeps/s390/s390-32/memcpy.S')
-rw-r--r--sysdeps/s390/s390-32/memcpy.S50
1 files changed, 30 insertions, 20 deletions
diff --git a/sysdeps/s390/s390-32/memcpy.S b/sysdeps/s390/s390-32/memcpy.S
index 2ac51ab62e..6be5104b68 100644
--- a/sysdeps/s390/s390-32/memcpy.S
+++ b/sysdeps/s390/s390-32/memcpy.S
@@ -25,12 +25,23 @@
%r3 = address of source memory area
%r4 = number of bytes to copy. */
-#ifdef USE_MULTIARCH
-ENTRY(__memcpy_default)
-#else
-ENTRY(memcpy)
+ .text
+ENTRY(__mempcpy)
+ .machine "g5"
+ lr %r1,%r2 # Use as dest
+ la %r2,0(%r4,%r2) # Return dest + n
+ j .L_G5_start
+END(__mempcpy)
+#ifndef USE_MULTIARCH
+libc_hidden_def (__mempcpy)
+weak_alias (__mempcpy, mempcpy)
+libc_hidden_builtin_def (mempcpy)
#endif
+
+ENTRY(memcpy)
.machine "g5"
+ lr %r1,%r2 # r1: Use as dest ; r2: Return dest
+.L_G5_start:
st %r13,52(%r15)
.cfi_offset 13, -44
basr %r13,0
@@ -41,14 +52,13 @@ ENTRY(memcpy)
lr %r5,%r4
srl %r5,8
ltr %r5,%r5
- lr %r1,%r2
jne .L_G5_13
ex %r4,.L_G5_17-.L_G5_16(%r13)
.L_G5_4:
l %r13,52(%r15)
br %r14
.L_G5_13:
- chi %r5,4096 # Switch to mvcle for copies >1MB
+ chi %r5,4096 # Switch to mvcle for copies >1MB
jh __memcpy_mvcle
.L_G5_12:
mvc 0(256,%r1),0(%r3)
@@ -59,24 +69,24 @@ ENTRY(memcpy)
j .L_G5_4
.L_G5_17:
mvc 0(1,%r1),0(%r3)
-#ifdef USE_MULTIARCH
-END(__memcpy_default)
-#else
END(memcpy)
+#ifndef USE_MULTIARCH
libc_hidden_builtin_def (memcpy)
#endif
ENTRY(__memcpy_mvcle)
- # Using as standalone function will result in unexpected
- # results since the length field is incremented by 1 in order to
- # compensate the changes already done in the functions above.
- ahi %r4,1 # length + 1
- lr %r5,%r4 # source length
- lr %r4,%r3 # source address
- lr %r3,%r5 # destination length = source length
+ # Using as standalone function will result in unexpected
+ # results since the length field is incremented by 1 in order to
+ # compensate the changes already done in the functions above.
+ lr %r0,%r2 # backup return dest [ + n ]
+ ahi %r4,1 # length + 1
+ lr %r5,%r4 # source length
+ lr %r4,%r3 # source address
+ lr %r2,%r1 # destination address
+ lr %r3,%r5 # destination length = source length
.L_MVCLE_1:
- mvcle %r2,%r4,0 # thats it, MVCLE is your friend
- jo .L_MVCLE_1
- lr %r2,%r1 # return destination address
- br %r14
+ mvcle %r2,%r4,0 # thats it, MVCLE is your friend
+ jo .L_MVCLE_1
+ lr %r2,%r0 # return destination address
+ br %r14
END(__memcpy_mvcle)