[PATCHv3] SSE2/SSSE3/AVX2 optimized version of get_checksum1() for x86-64

Jorrit Jongma jorrit.jongma+rsync at gmail.com
Fri May 22 14:49:41 UTC 2020


Here's the third (and final, barring bugs) version, which builds _on
top_ of the patch already committed by Wayne.

This version also adds AVX2 support and rearranges defines and
filenames in a way that seems more logical and future-proof to me.

Real-world tests shows about an 8% network transfer time reduction of
large files existing at both ends yet slightly modified, with the
receiving end being a slow CPU.


@ GitHub:

https://github.com/Chainfire/rsync/commit/9f888c8721342c9a02f46029fc257892b91e642f

https://github.com/Chainfire/rsync/commit/9f888c8721342c9a02f46029fc257892b91e642f.patch

--

>From 9f888c8721342c9a02f46029fc257892b91e642f Mon Sep 17 00:00:00 2001
From: Jorrit Jongma <git at jongma.org>
Date: Fri, 22 May 2020 13:03:55 +0200
Subject: [PATCH] AVX2 optimized version of get_checksum1() for x86-64

Additionally restructures build switches and defines from SSE2 to SIMD,
to allow potential reuse should patches become available with SIMD
instructions for other processor architectures.
---
 Makefile.in              |   9 +-
 checksum.c               |   2 +-
 checksum_simd_x86_64.cpp | 414 +++++++++++++++++++++++++++++++++++++++
 checksum_sse2.cpp        | 289 ---------------------------
 configure.ac             |  29 ++-
 options.c                |  10 +-
 6 files changed, 446 insertions(+), 307 deletions(-)
 create mode 100644 checksum_simd_x86_64.cpp
 delete mode 100644 checksum_sse2.cpp

diff --git a/Makefile.in b/Makefile.in
index 30869294..af5aaa56 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -17,6 +17,7 @@ CXXFLAGS=@CXXFLAGS@
 EXEEXT=@EXEEXT@
 LDFLAGS=@LDFLAGS@
 LIBOBJDIR=lib/
+SIMD=@SIMD@

 INSTALLCMD=@INSTALL@
 INSTALLMAN=@INSTALL@
@@ -31,6 +32,11 @@ VERSION=@RSYNC_VERSION@
 .SUFFIXES:
 .SUFFIXES: .c .o

+CXXOBJ=
+ifeq ($(SIMD),x86-64)
+    CXXOBJ=checksum_simd_x86_64.o
+endif
+
 GENFILES=configure.sh aclocal.m4 config.h.in proto.h proto.h-tstamp
rsync.1 rsync-ssl.1 rsyncd.conf.5
 HEADERS=byteorder.h config.h errcode.h proto.h rsync.h ifuncs.h
itypes.h inums.h \
  lib/pool_alloc.h
@@ -43,7 +49,6 @@ OBJS1=flist.o rsync.o generator.o receiver.o
cleanup.o sender.o exclude.o \
 OBJS2=options.o io.o compat.o hlink.o token.o uidlist.o socket.o hashtable.o \
  fileio.o batch.o clientname.o chmod.o acls.o xattrs.o
 OBJS3=progress.o pipe.o
-CXXOBJ=@CXXOBJ@
 DAEMON_OBJ = params.o loadparm.o clientserver.o access.o connection.o
authenticate.o
 popt_OBJS=popt/findme.o  popt/popt.o  popt/poptconfig.o \
  popt/popthelp.o popt/poptparse.o
@@ -118,7 +123,7 @@ rounding.h: rounding.c rsync.h proto.h
  fi
  @rm -f rounding.out

-checksum_sse2.o: checksum_sse2.cpp
+checksum_simd_x86_64.o: checksum_simd_x86_64.cpp
  $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c -o $@ $<

 tls$(EXEEXT): $(TLS_OBJ)
diff --git a/checksum.c b/checksum.c
index 8698543d..5954a872 100644
--- a/checksum.c
+++ b/checksum.c
@@ -99,7 +99,7 @@ int canonical_checksum(int csum_type)
  return csum_type >= CSUM_MD4 ? 1 : 0;
 }

-#ifndef ENABLE_SSE2 /* See checksum_sse2.cpp for the SSE2 version. */
+#ifndef HAVE_SIMD // see checksum_simd_*.cpp
 /*
   a simple 32 bit checksum that can be updated from either end
   (inspired by Mark Adler's Adler-32 checksum)
diff --git a/checksum_simd_x86_64.cpp b/checksum_simd_x86_64.cpp
new file mode 100644
index 00000000..cfbc8adf
--- /dev/null
+++ b/checksum_simd_x86_64.cpp
@@ -0,0 +1,414 @@
+/*
+ * SSE2/SSSE3/AVX2-optimized routines to support checksumming of bytes.
+ *
+ * Copyright (C) 1996 Andrew Tridgell
+ * Copyright (C) 1996 Paul Mackerras
+ * Copyright (C) 2004-2020 Wayne Davison
+ * Copyright (C) 2020 Jorrit Jongma
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, visit the http://fsf.org website.
+ */
+/*
+ * Optimization target for get_checksum1() was the Intel Atom D2700, the
+ * slowest CPU in the test set and the most likely to be CPU limited during
+ * transfers. The combination of intrinsics was chosen specifically for the
+ * most gain on that CPU, other combinations were occasionally slightly
+ * faster on the others.
+ *
+ * While on more modern CPUs transfers are less likely to be CPU limited
+ * (at least by this specific function), lower CPU usage is always better.
+ * Improvements may still be seen when matching chunks from NVMe storage
+ * even on newer CPUs.
+ *
+ * Benchmarks (in MB/s)            C    SSE2   SSSE3    AVX2
+ * - Intel Atom D2700            550     750    1000     N/A
+ * - Intel i7-7700hq            1850    2550    4050    6200
+ * - AMD ThreadRipper 2950x     2900    5600    8950    8100
+ *
+ * Curiously the AMD is slower with AVX2 than SSSE3, while the Intel is
+ * significantly faster. AVX2 is kept because it's more likely to relieve
+ * the bottleneck on the slower CPU.
+ *
+ * This optimization for get_checksum1() is intentionally limited to x86-64
+ * as no 32-bit CPU was available for testing. As 32-bit CPUs only have half
+ * the available xmm registers, this optimized version may not be faster than
+ * the pure C version anyway. Note that all x86-64 CPUs support at least SSE2.
+ *
+ * This file is compiled using GCC 4.8+'s C++ front end to allow the use of
+ * the target attribute, selecting the fastest code path based on runtime
+ * detection of CPU capabilities.
+ */
+
+#ifdef __x86_64__
+#ifdef __cplusplus
+
+#include "rsync.h"
+
+#ifdef HAVE_SIMD
+
+#include <immintrin.h>
+
+/* Compatibility functions to let our SSSE3 algorithm run on SSE2 */
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a, __m128i b) {
+    return _mm_packs_epi32(
+        _mm_srai_epi32(a, 16),
+        _mm_srai_epi32(b, 16)
+    );
+}
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_interleave_even_epi16(__m128i a, __m128i b) {
+    return sse_interleave_odd_epi16(
+        _mm_slli_si128(a, 2),
+        _mm_slli_si128(b, 2)
+    );
+}
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a, __m128i b) {
+    return _mm_mullo_epi16(
+        _mm_srli_epi16(a, 8),
+        _mm_srai_epi16(b, 8)
+    );
+}
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_mulu_even_epi8(__m128i a, __m128i b) {
+    return _mm_mullo_epi16(
+        _mm_and_si128(a, _mm_set1_epi16(0xFF)),
+        _mm_srai_epi16(_mm_slli_si128(b, 1), 8)
+    );
+}
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) {
+    return _mm_adds_epi16(
+        sse_interleave_even_epi16(a, b),
+        sse_interleave_odd_epi16(a, b)
+    );
+}
+
+__attribute__ ((target("ssse3"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) {
+    return _mm_hadds_epi16(a, b);
+}
+
+__attribute__ ((target("sse2"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) {
+    return _mm_adds_epi16(
+        sse_mulu_even_epi8(a, b),
+        sse_mulu_odd_epi8(a, b)
+    );
+}
+
+__attribute__ ((target("ssse3"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) {
+    return _mm_maddubs_epi16(a, b);
+}
+
+__attribute__ ((target("default"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a, __m128i b) { }
+__attribute__ ((target("default"))) static inline __m128i
sse_interleave_even_epi16(__m128i a, __m128i b) { }
+__attribute__ ((target("default"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a, __m128i b) { }
+__attribute__ ((target("default"))) static inline __m128i
sse_mulu_even_epi8(__m128i a, __m128i b) { }
+__attribute__ ((target("default"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) { }
+__attribute__ ((target("default"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) { }
+
+/*
+  Original loop per 4 bytes:
+    s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
+    s1 += buf[i] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET;
+
+  SSE2/SSSE3 loop per 32 bytes:
+    int16 t1[8];
+    int16 t2[8];
+    for (int j = 0; j < 8; j++) {
+      t1[j] = buf[j*4 + i] + buf[j*4 + i+1] + buf[j*4 + i+2] + buf[j*4 + i+3];
+      t2[j] = 4*buf[j*4 + i] + 3*buf[j*4 + i+1] + 2*buf[j*4 + i+2] +
buf[j*4 + i+3];
+    }
+    s2 += 32*s1 + (uint32)(
+              28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] +
8*t1[5] + 4*t1[6] +
+              t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7]
+          ) + 528*CHAR_OFFSET;
+    s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] +
t1[6] + t1[7]) +
+          32*CHAR_OFFSET;
+ */
+/*
+  Both sse2 and ssse3 targets must be specified here or we lose (a lot) of
+  performance, possibly due to not unrolling+inlining the called targeted
+  functions.
+ */
+__attribute__ ((target("sse2", "ssse3"))) static int32
get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1,
uint32* ps2) {
+    if (len > 32) {
+        int aligned = ((uintptr_t)buf & 15) == 0;
+
+        uint32 x[4] = {0};
+        x[0] = *ps1;
+        __m128i ss1 = _mm_loadu_si128((__m128i_u*)x);
+        x[0] = *ps2;
+        __m128i ss2 = _mm_loadu_si128((__m128i_u*)x);
+
+        const int16 mul_t1_buf[8] = {28, 24, 20, 16, 12, 8, 4, 0};
+        __m128i mul_t1 = _mm_loadu_si128((__m128i_u*)mul_t1_buf);
+
+        for (; i < (len-32); i+=32) {
+            // Load ... 2*[int8*16]
+            // SSSE3 has _mm_lqqdu_si128, but this requires another
+            // target function for each SSE2 and SSSE3 loads. For reasons
+            // unknown (to me) we lose about 10% performance on some CPUs if
+            // we do that right here. We just use _mm_loadu_si128 as for all
+            // but a handful of specific old CPUs they are synonymous, and
+            // take the 1-5% hit on those specific CPUs where it isn't.
+            __m128i in8_1, in8_2;
+            if (!aligned) {
+                in8_1 = _mm_loadu_si128((__m128i_u*)&buf[i]);
+                in8_2 = _mm_loadu_si128((__m128i_u*)&buf[i + 16]);
+            } else {
+                in8_1 = _mm_load_si128((__m128i_u*)&buf[i]);
+                in8_2 = _mm_load_si128((__m128i_u*)&buf[i + 16]);
+            }
+
+            // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ...
2*[int16*8]
+            // Fastest, even though multiply by 1
+            __m128i mul_one = _mm_set1_epi8(1);
+            __m128i add16_1 = sse_maddubs_epi16(mul_one, in8_1);
+            __m128i add16_2 = sse_maddubs_epi16(mul_one, in8_2);
+
+            // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
+            __m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 <<
16) + (1 << 24));
+            __m128i mul_add16_1 = sse_maddubs_epi16(mul_const, in8_1);
+            __m128i mul_add16_2 = sse_maddubs_epi16(mul_const, in8_2);
+
+            // s2 += 32*s1
+            ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
+
+            // [sum(t1[0]..t1[7]), X, X, X] [int32*4]; faster than
multiple _mm_hadds_epi16
+            // Shifting left, then shifting right again and shuffling
(rather than just
+            // shifting right as with mul32 below) to cheaply end up
with the correct sign
+            // extension as we go from int16 to int32.
+            __m128i sum_add32 = _mm_add_epi16(add16_1, add16_2);
+            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 2));
+            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 4));
+            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 8));
+            sum_add32 = _mm_srai_epi32(sum_add32, 16);
+            sum_add32 = _mm_shuffle_epi32(sum_add32, 3);
+
+            // [sum(t2[0]..t2[7]), X, X, X] [int32*4]; faster than
multiple _mm_hadds_epi16
+            __m128i sum_mul_add32 = _mm_add_epi16(mul_add16_1, mul_add16_2);
+            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 2));
+            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 4));
+            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 8));
+            sum_mul_add32 = _mm_srai_epi32(sum_mul_add32, 16);
+            sum_mul_add32 = _mm_shuffle_epi32(sum_mul_add32, 3);
+
+            // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] +
t1[6] + t1[7]
+            ss1 = _mm_add_epi32(ss1, sum_add32);
+
+            // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] +
t2[6] + t2[7]
+            ss2 = _mm_add_epi32(ss2, sum_mul_add32);
+
+            // [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
+            // We could've combined this with generating sum_add32 above and
+            // save an instruction but benchmarking shows that as being slower
+            __m128i add16 = sse_hadds_epi16(add16_1, add16_2);
+
+            // [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
+            __m128i mul32 = _mm_madd_epi16(add16, mul_t1);
+
+            // [sum(mul32), X, X, X] [int32*4]; faster than multiple
_mm_hadd_epi32
+            mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 4));
+            mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 8));
+
+            // s2 += 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] +
12*t1[4] + 8*t1[5] + 4*t1[6]
+            ss2 = _mm_add_epi32(ss2, mul32);
+
+#if CHAR_OFFSET != 0
+            // s1 += 32*CHAR_OFFSET
+            __m128i char_offset_multiplier = _mm_set1_epi32(32 * CHAR_OFFSET);
+            ss1 = _mm_add_epi32(ss1, char_offset_multiplier);
+
+            // s2 += 528*CHAR_OFFSET
+            char_offset_multiplier = _mm_set1_epi32(528 * CHAR_OFFSET);
+            ss2 = _mm_add_epi32(ss2, char_offset_multiplier);
+#endif
+        }
+
+        _mm_store_si128((__m128i_u*)x, ss1);
+        *ps1 = x[0];
+        _mm_store_si128((__m128i_u*)x, ss2);
+        *ps2 = x[0];
+    }
+    return i;
+}
+
+/*
+  AVX2 loop per 64 bytes:
+    int16 t1[16];
+    int16 t2[16];
+    for (int j = 0; j < 16; j++) {
+      t1[j] = buf[j*4 + i] + buf[j*4 + i+1] + buf[j*4 + i+2] + buf[j*4 + i+3];
+      t2[j] = 4*buf[j*4 + i] + 3*buf[j*4 + i+1] + 2*buf[j*4 + i+2] +
buf[j*4 + i+3];
+    }
+    s2 += 64*s1 + (uint32)(
+              60*t1[0] + 56*t1[1] + 52*t1[2] + 48*t1[3] + 44*t1[4] +
40*t1[5] + 36*t1[6] + 32*t1[7] + 28*t1[8] + 24*t1[9] + 20*t1[10] +
16*t1[11] + 12*t1[12] + 8*t1[13] + 4*t1[14] +
+              t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] +
t2[7] + t2[8] + t2[9] + t2[10] + t2[11] + t2[12] + t2[13] + t2[14] +
t2[15]
+          ) + 2080*CHAR_OFFSET;
+    s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] +
t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] +
t1[14] + t1[15]) +
+          64*CHAR_OFFSET;
+ */
+__attribute__ ((target("avx2"))) static int32
get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1,
uint32* ps2) {
+    if (len > 64) {
+        // Instructions reshuffled compared to SSE2 for slightly
better performance
+        int aligned = ((uintptr_t)buf & 31) == 0;
+
+        uint32 x[8] = {0};
+        x[0] = *ps1;
+        __m256i ss1 = _mm256_lddqu_si256((__m256i_u*)x);
+        x[0] = *ps2;
+        __m256i ss2 = _mm256_lddqu_si256((__m256i_u*)x);
+
+        // The order gets shuffled compared to SSE2
+        const int16 mul_t1_buf[16] = {60, 56, 52, 48, 28, 24, 20, 16,
44, 40, 36, 32, 12, 8, 4, 0};
+        __m256i mul_t1 = _mm256_lddqu_si256((__m256i_u*)mul_t1_buf);
+
+        for (; i < (len-64); i+=64) {
+            // Load ... 2*[int8*32]
+            __m256i in8_1, in8_2;
+            if (!aligned) {
+                in8_1 = _mm256_lddqu_si256((__m256i_u*)&buf[i]);
+                in8_2 = _mm256_lddqu_si256((__m256i_u*)&buf[i + 32]);
+            } else {
+                in8_1 = _mm256_load_si256((__m256i_u*)&buf[i]);
+                in8_2 = _mm256_load_si256((__m256i_u*)&buf[i + 32]);
+            }
+
+            // Prefetch for next loops. This has no observable effect on the
+            // tested AMD but makes as much as 20% difference on the Intel.
+            // Curiously that same Intel sees no benefit from this with SSE2
+            // or SSSE3.
+            _mm_prefetch(&buf[i + 64], _MM_HINT_T0);
+            _mm_prefetch(&buf[i + 96], _MM_HINT_T0);
+            _mm_prefetch(&buf[i + 128], _MM_HINT_T0);
+            _mm_prefetch(&buf[i + 160], _MM_HINT_T0);
+
+            // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ...
2*[int16*16]
+            // Fastest, even though multiply by 1
+            __m256i mul_one = _mm256_set1_epi8(1);
+            __m256i add16_1 = _mm256_maddubs_epi16(mul_one, in8_1);
+            __m256i add16_2 = _mm256_maddubs_epi16(mul_one, in8_2);
+
+            // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ...
2*[int16*16]
+            __m256i mul_const = _mm256_set1_epi32(4 + (3 << 8) + (2
<< 16) + (1 << 24));
+            __m256i mul_add16_1 = _mm256_maddubs_epi16(mul_const, in8_1);
+            __m256i mul_add16_2 = _mm256_maddubs_epi16(mul_const, in8_2);
+
+            // s2 += 64*s1
+            ss2 = _mm256_add_epi32(ss2, _mm256_slli_epi32(ss1, 6));
+
+            // [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*16]
+            __m256i add16 = _mm256_hadds_epi16(add16_1, add16_2);
+
+            // [t1[0], t1[1], ...] -> [t1[0]*60 + t1[1]*56, ...] [int32*8]
+            __m256i mul32 = _mm256_madd_epi16(add16, mul_t1);
+
+            // [sum(t1[0]..t1[15]), X, X, X, X, X, X, X] [int32*8]
+            __m256i sum_add32 = _mm256_add_epi16(add16_1, add16_2);
+            sum_add32 = _mm256_add_epi16(sum_add32,
_mm256_permute4x64_epi64(sum_add32, 2 + (3 << 2) + (0 << 4) + (1 <<
6)));
+            sum_add32 = _mm256_add_epi16(sum_add32,
_mm256_slli_si256(sum_add32, 2));
+            sum_add32 = _mm256_add_epi16(sum_add32,
_mm256_slli_si256(sum_add32, 4));
+            sum_add32 = _mm256_add_epi16(sum_add32,
_mm256_slli_si256(sum_add32, 8));
+            sum_add32 = _mm256_srai_epi32(sum_add32, 16);
+            sum_add32 = _mm256_shuffle_epi32(sum_add32, 3);
+
+            // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] +
t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] +
t1[14] + t1[15]
+            ss1 = _mm256_add_epi32(ss1, sum_add32);
+
+            // [sum(t2[0]..t2[15]), X, X, X, X, X, X, X] [int32*8]
+            __m256i sum_mul_add32 = _mm256_add_epi16(mul_add16_1, mul_add16_2);
+            sum_mul_add32 = _mm256_add_epi16(sum_mul_add32,
_mm256_permute4x64_epi64(sum_mul_add32, 2 + (3 << 2) + (0 << 4) + (1
<< 6)));
+            sum_mul_add32 = _mm256_add_epi16(sum_mul_add32,
_mm256_slli_si256(sum_mul_add32, 2));
+            sum_mul_add32 = _mm256_add_epi16(sum_mul_add32,
_mm256_slli_si256(sum_mul_add32, 4));
+            sum_mul_add32 = _mm256_add_epi16(sum_mul_add32,
_mm256_slli_si256(sum_mul_add32, 8));
+            sum_mul_add32 = _mm256_srai_epi32(sum_mul_add32, 16);
+            sum_mul_add32 = _mm256_shuffle_epi32(sum_mul_add32, 3);
+
+            // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] +
t2[6] + t2[7] + t2[8] + t2[9] + t2[10] + t2[11] + t2[12] + t2[13] +
t2[14] + t2[15]
+            ss2 = _mm256_add_epi32(ss2, sum_mul_add32);
+
+            // [sum(mul32), X, X, X, X, X, X, X] [int32*8]
+            mul32 = _mm256_add_epi32(mul32,
_mm256_permute2x128_si256(mul32, mul32, 1));
+            mul32 = _mm256_add_epi32(mul32, _mm256_srli_si256(mul32, 4));
+            mul32 = _mm256_add_epi32(mul32, _mm256_srli_si256(mul32, 8));
+
+            // s2 += 60*t1[0] + 56*t1[1] + 52*t1[2] + 48*t1[3] +
44*t1[4] + 40*t1[5] + 36*t1[6] + 32*t1[7] + 28*t1[8] + 24*t1[9] +
20*t1[10] + 16*t1[11] + 12*t1[12] + 8*t1[13] + 4*t1[14]
+            ss2 = _mm256_add_epi32(ss2, mul32);
+
+#if CHAR_OFFSET != 0
+            // s1 += 64*CHAR_OFFSET
+            __m256i char_offset_multiplier = _mm256_set1_epi32(64 *
CHAR_OFFSET);
+            ss1 = _mm256_add_epi32(ss1, char_offset_multiplier);
+
+            // s2 += 2080*CHAR_OFFSET
+            char_offset_multiplier = _mm256_set1_epi32(2080 * CHAR_OFFSET);
+            ss2 = _mm256_add_epi32(ss2, char_offset_multiplier);
+#endif
+        }
+
+        _mm256_store_si256((__m256i_u*)x, ss1);
+        *ps1 = x[0];
+        _mm256_store_si256((__m256i_u*)x, ss2);
+        *ps2 = x[0];
+    }
+    return i;
+}
+
+__attribute__ ((target("default"))) static int32
get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1,
uint32* ps2) {
+    return i;
+}
+
+__attribute__ ((target("default"))) static int32
get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1,
uint32* ps2) {
+    return i;
+}
+
+static inline int32 get_checksum1_default_1(schar* buf, int32 len,
int32 i, uint32* ps1, uint32* ps2) {
+ uint32 s1 = *ps1;
+ uint32 s2 = *ps2;
+ for (; i < (len-4); i+=4) {
+ s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] + 10*CHAR_OFFSET;
+ s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
+ }
+ for (; i < len; i++) {
+ s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
+ }
+ *ps1 = s1;
+ *ps2 = s2;
+    return i;
+}
+
+extern "C" {
+
+uint32 get_checksum1(char *buf1, int32 len) {
+    int32 i = 0;
+    uint32 s1 = 0;
+    uint32 s2 = 0;
+
+    // multiples of 64 bytes using AVX2 (if available)
+    i = get_checksum1_avx2_64((schar*)buf1, len, i, &s1, &s2);
+
+    // multiples of 32 bytes using SSE2/SSSE3 (if available)
+    i = get_checksum1_sse2_32((schar*)buf1, len, i, &s1, &s2);
+
+    // whatever is left
+    i = get_checksum1_default_1((schar*)buf1, len, i, &s1, &s2);
+
+    return (s1 & 0xffff) + (s2 << 16);
+}
+
+}
+#endif /* HAVE_SIMD */
+#endif /* __cplusplus */
+#endif /* __x86_64__ */
diff --git a/checksum_sse2.cpp b/checksum_sse2.cpp
deleted file mode 100644
index 515596f0..00000000
--- a/checksum_sse2.cpp
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * SSE2/SSSE3-optimized routines to support checksumming of bytes.
- *
- * Copyright (C) 1996 Andrew Tridgell
- * Copyright (C) 1996 Paul Mackerras
- * Copyright (C) 2004-2020 Wayne Davison
- * Copyright (C) 2020 Jorrit Jongma
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, visit the http://fsf.org website.
- */
-/*
- * Optimization target for get_checksum1() was the Intel Atom D2700, the
- * slowest CPU in the test set and the most likely to be CPU limited during
- * transfers. The combination of intrinsics was chosen specifically for the
- * most gain on that CPU, other combinations were occasionally slightly
- * faster on the others.
- *
- * While on more modern CPUs transfers are less likely to be CPU limited,
- * lower CPU usage is always better. Improvements may still be seen when
- * matching chunks from NVMe storage even on newer CPUs.
- *
- * Benchmarks                   C           SSE2        SSSE3
- * - Intel Atom D2700           550 MB/s    750 MB/s    1000 MB/s
- * - Intel i7-7700hq            1850 MB/s   2550 MB/s   4050 MB/s
- * - AMD ThreadRipper 2950x     2900 MB/s   5600 MB/s   8950 MB/s
- *
- * This optimization for get_checksum1() is intentionally limited to x86-64
- * as no 32-bit CPU was available for testing. As 32-bit CPUs only have half
- * the available xmm registers, this optimized version may not be faster than
- * the pure C version anyway. Note that all x86-64 CPUs support SSE2.
- *
- * This file is compiled using GCC 4.8+'s C++ front end to allow the use of
- * the target attribute, selecting the fastest code path based on runtime
- * detection of CPU capabilities.
- */
-
-#ifdef __x86_64__
-#ifdef __cplusplus
-
-#include "rsync.h"
-
-#ifdef ENABLE_SSE2
-
-#include <immintrin.h>
-
-/* Compatibility functions to let our SSSE3 algorithm run on SSE2 */
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_load_si128(__m128i_u* buf) {
-    return _mm_loadu_si128(buf);
-}
-
-__attribute__ ((target ("ssse3"))) static inline __m128i
sse_load_si128(__m128i_u* buf) {
-    return _mm_lddqu_si128(buf);  // same as loadu on all but the
oldest SSSE3 CPUs
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a, __m128i b) {
-    return _mm_packs_epi32(
-        _mm_srai_epi32(a, 16),
-        _mm_srai_epi32(b, 16)
-    );
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_interleave_even_epi16(__m128i a, __m128i b) {
-    return sse_interleave_odd_epi16(
-        _mm_slli_si128(a, 2),
-        _mm_slli_si128(b, 2)
-    );
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a, __m128i b) {
-    return _mm_mullo_epi16(
-        _mm_srli_epi16(a, 8),
-        _mm_srai_epi16(b, 8)
-    );
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_mulu_even_epi8(__m128i a, __m128i b) {
-    return _mm_mullo_epi16(
-        _mm_and_si128(a, _mm_set1_epi16(0xFF)),
-        _mm_srai_epi16(_mm_slli_si128(b, 1), 8)
-    );
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) {
-    return _mm_adds_epi16(
-        sse_interleave_even_epi16(a, b),
-        sse_interleave_odd_epi16(a, b)
-    );
-}
-
-__attribute__ ((target ("ssse3"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) {
-    return _mm_hadds_epi16(a, b);
-}
-
-__attribute__ ((target ("sse2"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) {
-    return _mm_adds_epi16(
-        sse_mulu_even_epi8(a, b),
-        sse_mulu_odd_epi8(a, b)
-    );
-}
-
-__attribute__ ((target ("ssse3"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) {
-    return _mm_maddubs_epi16(a, b);
-}
-
-__attribute__ ((target ("default"))) static inline __m128i
sse_load_si128(__m128i_u* buf) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_interleave_odd_epi16(__m128i a, __m128i b) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_interleave_even_epi16(__m128i a, __m128i b) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a, __m128i b) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_mulu_even_epi8(__m128i a, __m128i b) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_hadds_epi16(__m128i a, __m128i b) { }
-__attribute__ ((target ("default"))) static inline __m128i
sse_maddubs_epi16(__m128i a, __m128i b) { }
-
-/*
-  a simple 32 bit checksum that can be updated from either end
-  (inspired by Mark Adler's Adler-32 checksum)
-  */
-/*
-  Original loop per 4 bytes:
-    s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
-    s1 += buf[i] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET;
-
-  SSE2/SSSE3 loop per 32 bytes:
-    int16 t1[8];
-    int16 t2[8];
-    for (int j = 0; j < 8; j++) {
-      t1[j] = buf[j*4 + i] + buf[j*4 + i+1] + buf[j*4 + i+2] + buf[j*4 + i+3];
-      t2[j] = 4*buf[j*4 + i] + 3*buf[j*4 + i+1] + 2*buf[j*4 + i+2] +
buf[j*4 + i+3];
-    }
-    s2 += 32*s1 +
-          28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] +
8*t1[5] + 4*t1[6] +
-          t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7] +
-          ((16+32+48+64+80+96) + 8)*CHAR_OFFSET;
-    s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7] +
-          32*CHAR_OFFSET;
- */
-/*
-  Both sse2 and ssse3 targets must be specified here for the optimizer to
-  fully unroll into two separate functions for each, or it will decide which
-  version of other functions (such as sse_maddubs_epi16) to call every loop
-  iteration instead of properly inlining them, negating any performance gain.
- */
-__attribute__ ((target ("sse2", "ssse3"))) static inline uint32
get_checksum1_accel(char *buf1, int32 len) {
-    int32 i;
-    uint32 s1, s2;
-    schar *buf = (schar *)buf1;
-
-    i = s1 = s2 = 0;
-    if (len > 32) {
-        const char mul_t1_buf[16] = {28, 0, 24, 0, 20, 0, 16, 0, 12,
0, 8, 0, 4, 0, 0, 0};
-        __m128i mul_t1 = sse_load_si128((__m128i_u*)mul_t1_buf);
-        __m128i ss1 = _mm_setzero_si128();
-        __m128i ss2 = _mm_setzero_si128();
-
-        for (i = 0; i < (len-32); i+=32) {
-            // Load ... 2*[int8*16]
-            __m128i in8_1 = sse_load_si128((__m128i_u*)&buf[i]);
-            __m128i in8_2 = sse_load_si128((__m128i_u*)&buf[i + 16]);
-
-            // (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ...
2*[int16*8]
-            // Fastest, even though multiply by 1
-            __m128i mul_one = _mm_set1_epi8(1);
-            __m128i add16_1 = sse_maddubs_epi16(mul_one, in8_1);
-            __m128i add16_2 = sse_maddubs_epi16(mul_one, in8_2);
-
-            // (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
-            __m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 <<
16) + (1 << 24));
-            __m128i mul_add16_1 = sse_maddubs_epi16(mul_const, in8_1);
-            __m128i mul_add16_2 = sse_maddubs_epi16(mul_const, in8_2);
-
-            // s2 += 32*s1
-            ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
-
-            // [sum(t1[0]..t1[6]), X, X, X] [int32*4]; faster than
multiple _mm_hadds_epi16
-            // Shifting left, then shifting right again and shuffling
(rather than just
-            // shifting right as with mul32 below) to cheaply end up
with the correct sign
-            // extension as we go from int16 to int32.
-            __m128i sum_add32 = _mm_add_epi16(add16_1, add16_2);
-            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 2));
-            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 4));
-            sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 8));
-            sum_add32 = _mm_srai_epi32(sum_add32, 16);
-            sum_add32 = _mm_shuffle_epi32(sum_add32, 3);
-
-            // [sum(t2[0]..t2[6]), X, X, X] [int32*4]; faster than
multiple _mm_hadds_epi16
-            __m128i sum_mul_add32 = _mm_add_epi16(mul_add16_1, mul_add16_2);
-            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 2));
-            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 4));
-            sum_mul_add32 = _mm_add_epi16(sum_mul_add32,
_mm_slli_si128(sum_mul_add32, 8));
-            sum_mul_add32 = _mm_srai_epi32(sum_mul_add32, 16);
-            sum_mul_add32 = _mm_shuffle_epi32(sum_mul_add32, 3);
-
-            // s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] +
t1[6] + t1[7]
-            ss1 = _mm_add_epi32(ss1, sum_add32);
-
-            // s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] +
t2[6] + t2[7]
-            ss2 = _mm_add_epi32(ss2, sum_mul_add32);
-
-            // [t1[0], t1[1], ...] [int16*8]
-            // We could've combined this with generating sum_add32
above and save one _mm_add_epi16,
-            // but benchmarking shows that as being slower
-            __m128i add16 = sse_hadds_epi16(add16_1, add16_2);
-
-            // [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
-            __m128i mul32 = _mm_madd_epi16(add16, mul_t1);
-
-            // [sum(mul32), X, X, X] [int32*4]; faster than multiple
_mm_hadd_epi32
-            mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 4));
-            mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 8));
-
-            // s2 += 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] +
12*t1[4] + 8*t1[5] + 4*t1[6]
-            ss2 = _mm_add_epi32(ss2, mul32);
-
-#if CHAR_OFFSET != 0
-            // s1 += 32*CHAR_OFFSET
-            __m128i char_offset_multiplier = _mm_set1_epi32(32 * CHAR_OFFSET);
-            ss1 = _mm_add_epi32(ss1, char_offset_multiplier);
-
-            // s2 += 528*CHAR_OFFSET
-            char_offset_multiplier = _mm_set1_epi32(528 * CHAR_OFFSET);
-            ss2 = _mm_add_epi32(ss2, char_offset_multiplier);
-#endif
-        }
-
-        int32 x[4] = {0};
-        _mm_store_si128((__m128i_u*)x, ss1);
-        s1 = x[0];
-        _mm_store_si128((__m128i_u*)x, ss2);
-        s2 = x[0];
-    }
-    for (; i < (len-4); i+=4) {
-        s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
-        s1 += (buf[i] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
-    }
-    for (; i < len; i++) {
-        s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
-    }
-    return (s1 & 0xffff) + (s2 << 16);
-}
-
-/*
-  a simple 32 bit checksum that can be updated from either end
-  (inspired by Mark Adler's Adler-32 checksum)
-  */
-/*
-  Pure copy/paste from get_checksum1 @ checksum.c. We cannot use the target
-  attribute there as that requires cpp.
-  */
-__attribute__ ((target ("default"))) static inline uint32
get_checksum1_accel(char *buf1, int32 len)
-{
- int32 i;
- uint32 s1, s2;
- schar *buf = (schar *)buf1;
-
- s1 = s2 = 0;
- for (i = 0; i < (len-4); i+=4) {
- s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] + 10*CHAR_OFFSET;
- s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
- }
- for (; i < len; i++) {
- s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
- }
- return (s1 & 0xffff) + (s2 << 16);
-}
-
-extern "C" {
-
-/*
-  C doesn't support the target attribute, so here's another wrapper
-*/
-uint32 get_checksum1(char *buf1, int32 len) {
-    return get_checksum1_accel(buf1, len);
-}
-
-}
-#endif /* ENABLE_SSE2 */
-#endif /* __cplusplus */
-#endif /* __x86_64__ */
diff --git a/configure.ac b/configure.ac
index 76c08dc5..f5e155b2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -165,24 +165,33 @@ fi
 AC_DEFINE_UNQUOTED(NOBODY_USER, "nobody", [unprivileged user--e.g. nobody])
 AC_DEFINE_UNQUOTED(NOBODY_GROUP, "$NOBODY_GROUP", [unprivileged group
for unprivileged user])

-# SSE2+ optimizations on x86-64 require g++ support
-AC_MSG_CHECKING([whether to enable SSE2+ optimizations])
-AC_ARG_ENABLE(sse2,
- AS_HELP_STRING([--disable-sse2],[disable SSE2+ optimizations (req.
g++ and x86-64)]))
+# SIMD optimizations
+SIMD=

-if test x"$enable_sse2" = x"yes" && test x"$build_cpu" = x"x86_64" &&
test x"$CXX" = x"g++"; then
-    AC_MSG_RESULT([yes])
- AC_DEFINE(ENABLE_SSE2, 1, [Define to 1 to enable SSE2+ optimizations
(requires g++ and x86-64)])
- CXXOBJ="$CXXOBJ checksum_sse2.o"
-else
+AC_MSG_CHECKING([whether to enable SIMD optimizations])
+AC_ARG_ENABLE(simd,
+    AS_HELP_STRING([--enable-simd],[enable SIMD optimizations]))
+
+if test x"$enable_simd" = x"yes"; then
+    # For x86-64 SIMD, g++ is also required
+    if test x"$build_cpu" = x"x86_64" && test x"$CXX" = x"g++"; then
+        SIMD=x86-64
+    fi
+fi
+
+if test x"$SIMD" = x""; then
     AC_MSG_RESULT(no)
+else
+    AC_MSG_RESULT([yes ($SIMD)])
+ AC_DEFINE(HAVE_SIMD, 1, [Define to 1 to enable SIMD optimizations])
 fi

+AC_SUBST(SIMD)
+
 # We only use g++ for its target attribute dispatching, disable
unneeded bulky features
 if test x"$CXXOBJ" != x""; then
     CXXFLAGS="$CXXFLAGS -fno-exceptions -fno-rtti"
 fi
-AC_SUBST(CXXOBJ)

 # arrgh. libc in some old debian version screwed up the largefile
 # stuff, getting byte range locking wrong
diff --git a/options.c b/options.c
index ca3b97e1..742ff532 100644
--- a/options.c
+++ b/options.c
@@ -578,7 +578,7 @@ static void print_rsync_version(enum logcode f)
  char const *links = "no ";
  char const *iconv = "no ";
  char const *ipv6 = "no ";
- char const *sse2 = "no ";
+ char const *simd = "no ";
  STRUCT_STAT *dumstat;

 #if SUBPROTOCOL_VERSION != 0
@@ -615,8 +615,8 @@ static void print_rsync_version(enum logcode f)
 #ifdef CAN_SET_SYMLINK_TIMES
  symtimes = "";
 #endif
-#ifdef ENABLE_SSE2
- sse2 = "";
+#ifdef HAVE_SIMD
+ simd = "";
 #endif

  rprintf(f, "%s  version %s  protocol version %d%s\n",
@@ -631,8 +631,8 @@ static void print_rsync_version(enum logcode f)
  (int)(sizeof (int64) * 8));
  rprintf(f, "    %ssocketpairs, %shardlinks, %ssymlinks, %sIPv6,
batchfiles, %sinplace,\n",
  got_socketpair, hardlinks, links, ipv6, have_inplace);
- rprintf(f, "    %sappend, %sACLs, %sxattrs, %siconv, %ssymtimes,
%sprealloc, %ssse2\n",
- have_inplace, acls, xattrs, iconv, symtimes, prealloc, sse2);
+ rprintf(f, "    %sappend, %sACLs, %sxattrs, %siconv, %ssymtimes,
%sprealloc, %ssimd\n",
+ have_inplace, acls, xattrs, iconv, symtimes, prealloc, simd);

 #ifdef MAINTAINER_MODE
  rprintf(f, "Panic Action: \"%s\"\n", get_panic_action());



More information about the rsync mailing list