summaryrefslogtreecommitdiff
path: root/external/nss
diff options
context:
space:
mode:
authorStephan Bergmann <sbergman@redhat.com>2016-07-04 15:47:08 +0200
committerStephan Bergmann <sbergman@redhat.com>2016-07-04 15:47:08 +0200
commit3e27c437709e8f1e0df78826452f29330c68ebb5 (patch)
tree52d3caa2b8acac5a6908270f4ebaaef13e950bfe /external/nss
parent321b8ff86ba623b92aab5608eb94385e56823b65 (diff)
Avoid -fsanitize=alignment in external/nss
...in a way that might be a performance issue, so include the patch only conditionally. Change-Id: I483a77d0d199c2dee00139fdd4fa41223192289c
Diffstat (limited to 'external/nss')
-rw-r--r--external/nss/UnpackedTarball_nss.mk1
-rw-r--r--external/nss/ubsan-alignment.patch.040
2 files changed, 41 insertions, 0 deletions
diff --git a/external/nss/UnpackedTarball_nss.mk b/external/nss/UnpackedTarball_nss.mk
index 1a7ed1373230..e47b2419625e 100644
--- a/external/nss/UnpackedTarball_nss.mk
+++ b/external/nss/UnpackedTarball_nss.mk
@@ -43,6 +43,7 @@ ifeq ($(COM_IS_CLANG),TRUE)
ifneq ($(filter -fsanitize=%,$(CC)),)
$(eval $(call gb_UnpackedTarball_add_patches,nss,\
external/nss/asan.patch.1 \
+ external/nss/ubsan-alignment.patch.0 \
))
endif
endif
diff --git a/external/nss/ubsan-alignment.patch.0 b/external/nss/ubsan-alignment.patch.0
new file mode 100644
index 000000000000..4d13ffcda730
--- /dev/null
+++ b/external/nss/ubsan-alignment.patch.0
@@ -0,0 +1,40 @@
+--- nss/lib/freebl/md5.c
++++ nss/lib/freebl/md5.c
+@@ -443,7 +443,7 @@
+ /* Iterate over 64-byte chunks of the message. */
+ while (inputLen >= MD5_BUFFER_SIZE) {
+ #ifdef IS_LITTLE_ENDIAN
+-#ifdef NSS_X86_OR_X64
++#if 0
+ /* x86 can handle arithmetic on non-word-aligned buffers */
+ wBuf = (PRUint32 *)input;
+ #else
+--- nss/lib/freebl/sha_fast.c
++++ nss/lib/freebl/sha_fast.c
+@@ -16,7 +16,7 @@
+ #include "ssltrace.h"
+ #endif
+
+-static void shaCompress(volatile SHA_HW_t *X, const PRUint32 * datain);
++static void shaCompress(volatile SHA_HW_t *X, const unsigned char * datain);
+
+ #define W u.w
+ #define B u.b
+@@ -243,7 +243,7 @@
+ * code on AMD64.
+ */
+ static void
+-shaCompress(volatile SHA_HW_t *X, const PRUint32 *inbuf)
++shaCompress(volatile SHA_HW_t *X, const unsigned char *inbuf)
+ {
+ register SHA_HW_t A, B, C, D, E;
+
+@@ -275,7 +275,7 @@
+ #define SHA_RND4(a,b,c,d,e,n) \
+ a = SHA_ROTL(b,5)+SHA_F4(c,d,e)+a+XW(n)+K3; c=SHA_ROTL(c,30)
+
+-#define LOAD(n) XW(n) = SHA_HTONL(inbuf[n])
++#define LOAD(n) XW(n) = (((PRUint32)inbuf[4*n])<<24)|(((PRUint32)inbuf[4*n+1])<<16)|(((PRUint32)inbuf[4*n+2])<<8)|((PRUint32)inbuf[4*n+3])
+
+ A = XH(0);
+ B = XH(1);