Define _STRING_ARCH_unaligned unconditionally
This patch defines _STRING_ARCH_unaligned to 0 on default bits/string.h header to avoid undefined compiler warnings on platforms that do not define it. It also make adjustments in code where tests checked if macro existed or not.
This commit is contained in:
parent
a88ddc902b
commit
27822ce67f
13
ChangeLog
13
ChangeLog
@ -1,3 +1,16 @@
|
||||
2014-04-09 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
||||
|
||||
* bits/string.h (_STRING_ARCH_unaligned): Define it to 0.
|
||||
* crypt/sha256.c (_STRING_ARCH_unaligned): Check its value instead of its
|
||||
definition.
|
||||
* iconv/gconv_simple.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* iconv/loop.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* iconv/skeleton.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* nscd/nscd_gethst_r.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* nscd/nscd_getserv_r.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* nscd/nscd_helper.c (_STRING_ARCH_unaligned): Likewise.
|
||||
* resolv/res_send.c (_STRING_ARCH_unaligned): Likewise.
|
||||
|
||||
2014-04-09 Peter Brett <peter@peter-b.co.uk>
|
||||
|
||||
[BZ #15514]
|
||||
|
@ -8,5 +8,7 @@
|
||||
#ifndef _BITS_STRING_H
|
||||
#define _BITS_STRING_H 1
|
||||
|
||||
/* Define if architecture can access unaligned multi-byte variables. */
|
||||
#define _STRING_ARCH_unaligned 0
|
||||
|
||||
#endif /* bits/string.h */
|
||||
|
@ -125,7 +125,7 @@ __sha256_finish_ctx (ctx, resbuf)
|
||||
memcpy (&ctx->buffer[bytes], fillbuf, pad);
|
||||
|
||||
/* Put the 64-bit file length in *bits* at the end of the buffer. */
|
||||
#ifdef _STRING_ARCH_unaligned
|
||||
#if _STRING_ARCH_unaligned
|
||||
ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
|
||||
#else
|
||||
ctx->buffer32[(bytes + pad + 4) / 4] = SWAP (ctx->total[TOTAL64_low] << 3);
|
||||
|
@ -112,7 +112,7 @@ internal_ucs4_loop (struct __gconv_step *step,
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
static inline int
|
||||
__attribute ((always_inline))
|
||||
internal_ucs4_loop_unaligned (struct __gconv_step *step,
|
||||
@ -289,7 +289,7 @@ ucs4_internal_loop (struct __gconv_step *step,
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
static inline int
|
||||
__attribute ((always_inline))
|
||||
ucs4_internal_loop_unaligned (struct __gconv_step *step,
|
||||
@ -478,7 +478,7 @@ internal_ucs4le_loop (struct __gconv_step *step,
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
static inline int
|
||||
__attribute ((always_inline))
|
||||
internal_ucs4le_loop_unaligned (struct __gconv_step *step,
|
||||
@ -658,7 +658,7 @@ ucs4le_internal_loop (struct __gconv_step *step,
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
static inline int
|
||||
__attribute ((always_inline))
|
||||
ucs4le_internal_loop_unaligned (struct __gconv_step *step,
|
||||
|
@ -63,7 +63,7 @@
|
||||
representations with a fixed width of 2 or 4 bytes. But if we cannot
|
||||
access unaligned memory we still have to read byte-wise. */
|
||||
#undef FCTNAME2
|
||||
#if defined _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
|
||||
#if _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
|
||||
/* We can handle unaligned memory access. */
|
||||
# define get16(addr) *((const uint16_t *) (addr))
|
||||
# define get32(addr) *((const uint32_t *) (addr))
|
||||
@ -342,7 +342,7 @@ FCTNAME (LOOPFCT) (struct __gconv_step *step,
|
||||
|
||||
/* Include the file a second time to define the function to handle
|
||||
unaligned access. */
|
||||
#if !defined DEFINE_UNALIGNED && !defined _STRING_ARCH_unaligned \
|
||||
#if !defined DEFINE_UNALIGNED && !_STRING_ARCH_unaligned \
|
||||
&& MIN_NEEDED_INPUT != 1 && MAX_NEEDED_INPUT % MIN_NEEDED_INPUT == 0 \
|
||||
&& MIN_NEEDED_OUTPUT != 1 && MAX_NEEDED_OUTPUT % MIN_NEEDED_OUTPUT == 0
|
||||
# undef get16
|
||||
|
@ -204,7 +204,7 @@
|
||||
/* Define macros which can access unaligned buffers. These macros are
|
||||
supposed to be used only in code outside the inner loops. For the inner
|
||||
loops we have other definitions which allow optimized access. */
|
||||
#ifdef _STRING_ARCH_unaligned
|
||||
#if _STRING_ARCH_unaligned
|
||||
/* We can handle unaligned memory access. */
|
||||
# define get16u(addr) *((const uint16_t *) (addr))
|
||||
# define get32u(addr) *((const uint32_t *) (addr))
|
||||
@ -523,7 +523,7 @@ FUNCTION_NAME (struct __gconv_step *step, struct __gconv_step_data *data,
|
||||
INTERNAL, for which the subexpression evaluates to 1, but INTERNAL
|
||||
buffers are always aligned correctly. */
|
||||
#define POSSIBLY_UNALIGNED \
|
||||
(!defined _STRING_ARCH_unaligned \
|
||||
(!_STRING_ARCH_unaligned \
|
||||
&& (((FROM_LOOP_MIN_NEEDED_FROM != 1 \
|
||||
&& FROM_LOOP_MAX_NEEDED_FROM % FROM_LOOP_MIN_NEEDED_FROM == 0) \
|
||||
&& (FROM_LOOP_MIN_NEEDED_TO != 1 \
|
||||
|
@ -191,7 +191,7 @@ nscd_gethst_r (const char *key, size_t keylen, request_type type,
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
/* The aliases_len array in the mapped database might very
|
||||
well be unaligned. We will access it word-wise so on
|
||||
platforms which do not tolerate unaligned accesses we
|
||||
|
@ -141,7 +141,7 @@ nscd_getserv_r (const char *crit, size_t critlen, const char *proto,
|
||||
> recend, 0))
|
||||
goto out;
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
/* The aliases_len array in the mapped database might very
|
||||
well be unaligned. We will access it word-wise so on
|
||||
platforms which do not tolerate unaligned accesses we
|
||||
|
@ -490,7 +490,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||
struct hashentry *here = (struct hashentry *) (mapped->data + work);
|
||||
ref_t here_key, here_packet;
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
/* Although during garbage collection when moving struct hashentry
|
||||
records around we first copy from old to new location and then
|
||||
adjust pointer from previous hashentry to it, there is no barrier
|
||||
@ -512,7 +512,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||
struct datahead *dh
|
||||
= (struct datahead *) (mapped->data + here_packet);
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
|
||||
return NULL;
|
||||
#endif
|
||||
@ -536,7 +536,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||
struct hashentry *trailelem;
|
||||
trailelem = (struct hashentry *) (mapped->data + trail);
|
||||
|
||||
#ifndef _STRING_ARCH_unaligned
|
||||
#if !_STRING_ARCH_unaligned
|
||||
/* We have to redo the checks. Maybe the data changed. */
|
||||
if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
|
||||
return NULL;
|
||||
|
@ -787,7 +787,7 @@ send_vc(res_state statp,
|
||||
/* No buffer allocated for the first
|
||||
reply. We can try to use the rest
|
||||
of the user-provided buffer. */
|
||||
#ifdef _STRING_ARCH_unaligned
|
||||
#if _STRING_ARCH_unaligned
|
||||
*anssizp2 = orig_anssizp - resplen;
|
||||
*ansp2 = *ansp + resplen;
|
||||
#else
|
||||
@ -1205,7 +1205,7 @@ send_dg(res_state statp,
|
||||
/* No buffer allocated for the first
|
||||
reply. We can try to use the rest
|
||||
of the user-provided buffer. */
|
||||
#ifdef _STRING_ARCH_unaligned
|
||||
#if _STRING_ARCH_unaligned
|
||||
*anssizp2 = orig_anssizp - resplen;
|
||||
*ansp2 = *ansp + resplen;
|
||||
#else
|
||||
|
Loading…
x
Reference in New Issue
Block a user