ARC: Use new definitions for optional ARC CPU features

GCC for ARC has been updated to provide consistent naming of preprocessor
definitions for different optional architecture features:

    * __ARC_BARREL_SHIFTER__ instead of __Xbarrel_shifter for
      -mbarrel-shifter
    * __ARC_LL64__ instead of __LL64__ for -mll64
    * __ARCEM__ instead of __EM__ for -mcpu=arcem
    * __ARCHS__ instead of __HS__ for -mcpu=archs
    * etc (not used in newlib)

This patch updates assembly routines for ARC to use new definitions instead
of a deprecated ones. To ensure compatibility with older compiler new
definitions are also defined in asm.h if needed, based on deprecated
preprocessor definitions.

*** newlib/ChangeLog ***
2015-12-15  Anton Kolesov  <Anton.Kolesov@synopsys.com>

	* libc/machine/arc/asm.h: Define new GCC definition for old compiler.
	* libc/machine/arc/memcmp-bs-norm.S: Use new GCC defines to detect
	  processor features.
	* libc/machine/arc/memcmp.S: Likewise.
	* libc/machine/arc/memcpy-archs.S: Likewise.
	* libc/machine/arc/memcpy-bs.S: Likewise.
	* libc/machine/arc/memcpy.S: Likewise.
	* libc/machine/arc/memset-archs.S: Likewise.
	* libc/machine/arc/memset-bs.S: Likewise.
	* libc/machine/arc/memset.S: Likewise.
	* libc/machine/arc/setjmp.S: Likewise.
	* libc/machine/arc/strchr-bs-norm.S: Likewise.
	* libc/machine/arc/strchr-bs.S: Likewise.
	* libc/machine/arc/strchr.S: Likewise.
	* libc/machine/arc/strcmp-archs.S: Likewise.
	* libc/machine/arc/strcmp.S: Likewise.
	* libc/machine/arc/strcpy-bs-arc600.S: Likewise.
	* libc/machine/arc/strcpy-bs.S: Likewise.
	* libc/machine/arc/strcpy.S: Likewise.
	* libc/machine/arc/strlen-bs-norm.S: Likewise.
	* libc/machine/arc/strlen-bs.S: Likewise.
	* libc/machine/arc/strlen.S: Likewise.
	* libc/machine/arc/strncpy-bs.S: Likewise.
	* libc/machine/arc/strncpy.S: Likewise.

Signed-off-by: Anton Kolesov <Anton.Kolesov@synopsys.com>
This commit is contained in:
Anton Kolesov 2015-12-15 20:54:58 +03:00 committed by Corinna Vinschen
parent 088f7a7239
commit 06537f05d4
24 changed files with 126 additions and 69 deletions

View File

@ -1,3 +1,30 @@
2015-12-17 Anton Kolesov <Anton.Kolesov@synopsys.com>
* libc/machine/arc/asm.h: Define new GCC definition for old compiler.
* libc/machine/arc/memcmp-bs-norm.S: Use new GCC defines to detect
processor features.
* libc/machine/arc/memcmp.S: Likewise.
* libc/machine/arc/memcpy-archs.S: Likewise.
* libc/machine/arc/memcpy-bs.S: Likewise.
* libc/machine/arc/memcpy.S: Likewise.
* libc/machine/arc/memset-archs.S: Likewise.
* libc/machine/arc/memset-bs.S: Likewise.
* libc/machine/arc/memset.S: Likewise.
* libc/machine/arc/setjmp.S: Likewise.
* libc/machine/arc/strchr-bs-norm.S: Likewise.
* libc/machine/arc/strchr-bs.S: Likewise.
* libc/machine/arc/strchr.S: Likewise.
* libc/machine/arc/strcmp-archs.S: Likewise.
* libc/machine/arc/strcmp.S: Likewise.
* libc/machine/arc/strcpy-bs-arc600.S: Likewise.
* libc/machine/arc/strcpy-bs.S: Likewise.
* libc/machine/arc/strcpy.S: Likewise.
* libc/machine/arc/strlen-bs-norm.S: Likewise.
* libc/machine/arc/strlen-bs.S: Likewise.
* libc/machine/arc/strlen.S: Likewise.
* libc/machine/arc/strncpy-bs.S: Likewise.
* libc/machine/arc/strncpy.S: Likewise.
2015-12-17 Corinna Vinschen <corinna@vinschen.de>
* libc/include/sys/types.h: Remove including <sys/select.h>.

View File

@ -61,4 +61,22 @@
#define bcc_s bhs_s
/* Compatibility with older ARC GCC, that doesn't provide some of the
preprocessor defines used by newlib for ARC. */
#if defined (__Xbarrel_shifter) && !defined (__ARC_BARREL_SHIFTER__)
#define __ARC_BARREL_SHIFTER__ 1
#endif
#if defined (__EM__) && !defined (__ARCEM__)
#define __ARCEM__ 1
#endif
#if defined (__HS__) && !defined (__ARCHS__)
#define __ARCHS__ 1
#endif
#if defined (__LL64__) && !defined (__ARC_LL64__)
#define __ARC_LL64__ 1
#endif
#endif /* ARC_NEWLIB_ASM_H */

View File

@ -35,7 +35,9 @@
#include "asm.h"
#if !defined (__ARC601__) && defined (__ARC_NORM__) && defined (__Xbarrel_shifter)
#if !defined (__ARC601__) && defined (__ARC_NORM__) \
&& defined (__ARC_BARREL_SHIFTER__)
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
#define SHIFT r3
@ -47,7 +49,7 @@
ENTRY (memcmp)
or r12,r0,r1
asl_s r12,r12,30
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__)
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
sub_l r3,r2,1
brls r2,r12,.Lbytewise
#else
@ -57,7 +59,7 @@ ENTRY (memcmp)
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
#ifdef __EM__
#ifdef __ARCEM__
/* A branch can't be the last instruction in a zero overhead loop.
So we move the branch to the start of the loop, duplicate it
after the end, and set up r12 so that the branch isn't taken
@ -74,12 +76,12 @@ ENTRY (memcmp)
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
#ifdef __EM__
#ifdef __ARCEM__
.Loop_end:
brne WORD2,r12,.Lodd
#else
brne WORD2,r12,.Lodd
#ifdef __HS__
#ifdef __ARCHS__
nop
#endif
.Loop_end:
@ -90,7 +92,7 @@ ENTRY (memcmp)
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__)
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
nop_s
; one more load latency cycle
.Last_cmp:
@ -167,14 +169,14 @@ ENTRY (memcmp)
bset.cs r0,r0,31
.Lodd:
cmp_s WORD2,r12
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__)
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
mov_s r0,1
j_s.d [blink]
bset.cs r0,r0,31
#else /* !__ARC700__ */
#else
j_s.d [blink]
rrc r0,2
#endif /* !__ARC700__ */
#endif /* __ARC700__ || __ARCEM__ || __ARCHS__ */
#endif /* ENDIAN */
.balign 4
.Lbytewise:
@ -182,7 +184,7 @@ ENTRY (memcmp)
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
#ifdef __EM__
#ifdef __ARCEM__
mov r12,r3
lpne .Lbyte_end
brne r3,r12,.Lbyte_odd
@ -194,12 +196,12 @@ ENTRY (memcmp)
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
#ifdef __EM__
#ifdef __ARCEM__
.Lbyte_end:
brne r3,r12,.Lbyte_odd
#else
brne r3,r12,.Lbyte_odd
#ifdef __HS__
#ifdef __ARCHS__
nop
#endif
.Lbyte_end:
@ -218,6 +220,6 @@ ENTRY (memcmp)
j_s.d [blink]
mov_l r0,0
ENDFUNC (memcmp)
#endif /* !__ARC601__ && __ARC_NORM__ && __Xbarrel_shifter */
#endif /* !__ARC601__ && __ARC_NORM__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,8 @@
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_NORM__) || !defined (__Xbarrel_shifter)
#if defined (__ARC601__) || !defined (__ARC_NORM__) \
|| !defined (__ARC_BARREL_SHIFTER__)
/* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume
that we can subtract 8 from a source end address without underflow. */
@ -148,6 +149,6 @@ ENTRY (memcmp)
j_s.d [blink]
mov_s r0,0
ENDFUNC (memcmp)
#endif /* __ARC601__ || !__ARC_NORM__ || !__Xbarrel_shifter */
#endif /* __ARC601__ || !__ARC_NORM__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#if defined (__HS__)
#if defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
@ -53,7 +53,7 @@
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
#ifdef __LL64__
#ifdef __ARC_LL64__
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
@ -263,6 +263,6 @@ ENTRY (memcpy)
j [blink]
ENDFUNC (memcpy)
#endif /* __HS__ */
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,9 @@
#include "asm.h"
#if !defined (__ARC601__) && !defined (__HS__) && defined (__Xbarrel_shifter)
#if !defined (__ARC601__) && !defined (__ARCHS__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* Mostly optimized for ARC700, but not bad for ARC600 either. */
/* This memcpy implementation does not support objects of 1GB or larger -
the check for alignment does not work then. */
@ -98,6 +100,6 @@ ENTRY (memcpy)
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* !__ARC601__ && !__HS__ && __Xbarrel_shifter */
#endif /* !__ARC601__ && !__ARCHS__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,9 @@
#include "asm.h"
#if defined (__ARC601__) || (!defined (__Xbarrel_shifter) && !defined (__HS__))
#if defined (__ARC601__) || \
(!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* Adapted from memcpy-bs.S. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
@ -104,6 +106,6 @@ ENTRY (memcpy)
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* __ARC601__ || (!__Xbarrel_shifter && !__HS__) */
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#ifdef __HS__
#ifdef __ARCHS__
#ifdef USE_PREFETCH
#define PREWRITE(A,B) prefetchw [(A),(B)]
@ -81,7 +81,7 @@ ENTRY (memset)
lpnz @.Lset64bytes
; LOOP START
PREWRITE (r3, 64) ;Prefetch the next write location
#ifdef __LL64__
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@ -114,7 +114,7 @@ ENTRY (memset)
lpnz .Lset32bytes
; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef __LL64__
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@ -141,6 +141,6 @@ ENTRY (memset)
j [blink]
ENDFUNC (memset)
#endif /* __HS__ */
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -42,7 +42,7 @@
better would be to avoid a second entry point into function. ARC HS always
has barrel-shifter, so this implementation will be always used for this
purpose. */
#if !defined (__ARC601__) && defined (__Xbarrel_shifter)
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 7
@ -57,12 +57,12 @@
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
#ifdef __HS__
#ifdef __ARCHS__
ENTRY (__dummy_memset)
#else
ENTRY (memset)
#endif
#if !defined (__ARC700__) && !defined (__EM__)
#if !defined (__ARC700__) && !defined (__ARCEM__)
#undef SMALL
#define SMALL 8 /* Even faster if aligned. */
brls.d r2,SMALL,.Ltiny
@ -74,7 +74,7 @@ ENTRY (memset)
asl r12,r1,8
beq.d .Laligned
or_s r1,r1,r12
#if defined (__ARC700__) || defined (__EM__)
#if defined (__ARC700__) || defined (__ARCEM__)
brls r2,SMALL,.Ltiny
#endif
.Lnot_tiny:
@ -90,7 +90,7 @@ ENTRY (memset)
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
#if defined (__ARC700__) || defined (__EM__)
#if defined (__ARC700__) || defined (__ARCEM__)
asl r12,r1,16
lsr.f lp_count,r2,2
or_s r1,r1,r12
@ -111,7 +111,7 @@ ENTRY (memset)
st_s r1,[r3]
#endif /* !__ARC700 */
#if defined (__ARC700__) || defined (__EM__)
#if defined (__ARC700__) || defined (__ARCEM__)
.balign 4
__strncpy_bzero:
brhi.d r2,17,.Lnot_tiny
@ -144,11 +144,11 @@ __strncpy_bzero:
stb_s r1,[r3]
j_s [blink]
#endif /* !__ARC700 */
#ifdef __HS__
#ifdef __ARCHS__
ENDFUNC (__dummy_memset)
#else
ENDFUNC (memset)
#endif
#endif /* !__ARC601__ && __Xbarrel_shifter */
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,8 @@
#include "asm.h"
#if defined (__ARC601__) || (!defined (__Xbarrel_shifter) && !defined (__HS__))
#if defined (__ARC601__) \
|| (!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 8 /* Even faster if aligned. */
@ -104,6 +105,6 @@ __strncpy_bzero:
stb_s r1,[r3]
j_s [blink]
ENDFUNC (memset)
#endif /* __ARC601__ || (!__Xbarrel_shifter && !__HS__) */
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -92,7 +92,7 @@ setjmp:
st r2, [r0, ABIlps]
st r3, [r0, ABIlpe]
#if (!defined (__A7__) && !defined (__EM__) && !defined (__HS__))
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
; Till the configure changes are decided, and implemented, the code working on
; mlo/mhi and using mul64 should be disabled.
; st mlo, [r0, ABImlo]
@ -145,7 +145,7 @@ longjmp:
sr r2, [lp_start]
sr r3, [lp_end]
#if (!defined (__A7__) && !defined (__EM__) && !defined (__HS__))
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
ld r2, [r0, ABImlo]
ld r3, [r0, ABImhi]
; We do not support restoring of mulhi and mlo registers, yet.

View File

@ -39,8 +39,8 @@
words branch-free. */
#include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \
&& defined (__ARC_NORM__) && defined (__Xbarrel_shifter)
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
extb_s r1,r1
@ -160,6 +160,7 @@ ENTRY (strchr)
mov.mi r0,0
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* (__ARC700__ || __EM__ || __HS__) && __ARC_NORM__ && __Xbarrel_shifter */
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -49,8 +49,9 @@
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__Xbarrel_shifter) && \
#if defined (__ARC_BARREL_SHIFTER__) && \
(defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__)))
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
@ -195,6 +196,7 @@ ENTRY (strchr)
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __Xbarrel_shifter && (__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* __ARC_BARREL_SHIFTER__ &&
(__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -49,7 +49,7 @@
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC601__) || !defined (__Xbarrel_shifter)
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
@ -203,6 +203,6 @@ ENTRY (strchr)
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC601__ || !__Xbarrel_shifter */
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#ifdef __HS__
#ifdef __ARCHS__
ENTRY (strcmp)
or r2, r0, r1
bmsk_s r2, r2, 1
@ -104,6 +104,6 @@ ENTRY (strcmp)
j_s.d [blink]
sub r0, r2, r3
ENDFUNC (strcmp)
#endif /* __HS__ */
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -41,7 +41,7 @@
by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
#ifndef __HS__
#ifndef __ARCHS__
ENTRY (strcmp)
or r2,r0,r1
@ -128,6 +128,6 @@ ENTRY (strcmp)
j_s.d [blink]
sub r0,r2,r3
ENDFUNC (strcmp)
#endif /* !__HS__ */
#endif /* !__ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#if defined (__ARC600__) && defined (__Xbarrel_shifter)
#if defined (__ARC600__) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
@ -115,6 +115,6 @@ ENTRY (strcpy)
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* __ARC600__ && __Xbarrel_shifter */
#endif /* __ARC600__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,8 +35,8 @@
#include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \
&& defined (__Xbarrel_shifter)
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
@ -98,6 +98,6 @@ charloop:
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* (__ARC700__ || __EM__ || __HS__) && __Xbarrel_shifter */
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#if defined (__ARC601__) || !defined (__Xbarrel_shifter)
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
@ -85,6 +85,6 @@ ENTRY (strcpy)
stb.ab r3,[r10,1]
j_s [blink]
ENDFUNC (strcpy)
#endif /* __ARC601__ || !__Xbarrel_shifter */
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -34,8 +34,8 @@
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \
&& defined (__ARC_NORM__) && defined (__Xbarrel_shifter)
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strlen)
or r3,r0,7
@ -110,6 +110,7 @@ ENTRY (strlen)
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC700__ || __EM__ || __HS__) && __ARC_NORM__ && _Xbarrel_shifter */
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -36,7 +36,7 @@
#include "asm.h"
#if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \
&& defined (__Xbarrel_shifter)
&& defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC600 pipeline. */
ENTRY (strlen)
@ -117,6 +117,6 @@ ENTRY (strlen)
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __Xbarrel_shifter */
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h"
#if defined(__ARC601__) || !defined (__Xbarrel_shifter)
#if defined(__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC601 pipeline without barrel shifter. */
ENTRY (strlen)
@ -160,6 +160,6 @@ ENTRY (strlen)
sub_s.ne r1,r1,r1
#endif /* !SPECIAL_EARLY_END */
ENDFUNC (strlen)
#endif /* __ARC601__ || !__Xbarrel_shifter*/
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__*/
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -45,9 +45,9 @@
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
#if !defined (__ARC601__) && defined (__Xbarrel_shifter)
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
#if defined (__ARC700___) || defined (__EM__) || defined (__HS__)
#if defined (__ARC700___) || defined (__ARCEM__) || defined (__ARCHS__)
#define BRand(a,b,l) tst a,b ` bne_l l
#else
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
@ -112,7 +112,7 @@ ENTRY (strncpy)
.Lr4z:
mov_l r3,r4
.Lr3z:
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__)
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr_s r3,r3,8
@ -166,6 +166,6 @@ ENTRY (strncpy)
j_s.d [blink]
stb_l r12,[r3]
ENDFUNC (strncpy)
#endif /* !__ARC601__ && __Xbarrel_shifter */
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -40,7 +40,7 @@
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch. */
#if defined (__ARC601__) || !defined (__Xbarrel_shifter)
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
@ -129,6 +129,6 @@ ENTRY (strncpy)
j_s.d [blink]
stb_s r12,[r3]
ENDFUNC (strncpy)
#endif /* __ARC601__ || !__Xbarrel_shifter */
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */