[SCM] The rsync repository. - branch master updated
Rsync CVS commit messages
rsync-cvs at lists.samba.org
Wed Sep 14 14:17:57 UTC 2022
The branch, master has been updated
via 71c2b5d0 Fix exclusion of /. with --relative.
via f3f5d842 Tweak a define.
via 8b1b81e0 Use UNSUPPORTED instead of PROTOCOL for various validation checks.
via e8161304 Use hashlittle2() for xattr hashing
via b012cde1 Add hashlittle2() and ensure the hash is never 0
via 464555ea Fix really silly bug with --relative rules.
via df904f59 Improve var ref.
via 208d6ad1 NEWS tweak.
from 51dae12c Update NEWS.
https://git.samba.org/?p=rsync.git;a=shortlog;h=master
- Log -----------------------------------------------------------------
commit 71c2b5d0e386b845fb2f4d427568451f098008ff
Author: Wayne Davison <wayne at opencoder.net>
Date: Wed Sep 14 07:14:13 2022 -0700
Fix exclusion of /. with --relative.
commit f3f5d8420f97a6c0b27419cf1db24b93945b7395
Author: Wayne Davison <wayne at opencoder.net>
Date: Wed Sep 14 07:13:24 2022 -0700
Tweak a define.
commit 8b1b81e054bdcc927ff26f20f424e034bd273175
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 23:38:01 2022 -0700
Use UNSUPPORTED instead of PROTOCOL for various validation checks.
commit e8161304f74a786101a708c60c39ff2a94b78764
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 22:43:01 2022 -0700
Use hashlittle2() for xattr hashing
- The non-zero key code is now in hashtable.c
- The hashtable_create() code already checks for OOM
commit b012cde1ed34805ba6996933bee437a00d253c0c
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 22:37:39 2022 -0700
Add hashlittle2() and ensure the hash is never 0
It's probably time for a faster hash algorithm, but this gives us
the free 64-bit hashing that things like the xattr code can use.
commit 464555ea923b32f3504678d05bc7de9205e5c8da
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 20:56:32 2022 -0700
Fix really silly bug with --relative rules.
commit df904f590ecbcb7fe99a834beba961276b1616ce
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 20:55:26 2022 -0700
Improve var ref.
commit 208d6ad1cd6f315959a2b2f1b7f032144716ec49
Author: Wayne Davison <wayne at opencoder.net>
Date: Tue Sep 13 20:54:35 2022 -0700
NEWS tweak.
-----------------------------------------------------------------------
Summary of changes:
NEWS.md | 2 +-
exclude.c | 4 +-
flist.c | 10 ++--
hashtable.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
xattrs.c | 16 ++----
5 files changed, 180 insertions(+), 24 deletions(-)
Changeset truncated at 500 lines:
diff --git a/NEWS.md b/NEWS.md
index 44eddd9b..49dbc544 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -12,7 +12,7 @@
- Added negotiated daemon-auth support that allows a stronger checksum digest
to be used. Added SHA512, SHA256, and SHA1 digests to MD5 & MD4. These new
- digests are at the highest priority in the new negotiation list.
+ digests are at the highest priority in the new daemon-auth negotiation list.
- Added support for SHA1, SHA256, and SHA512 digests in file checksums. While
this tends to be overkill, it is available if someone really needs it. These
diff --git a/exclude.c b/exclude.c
index 4022e824..b21207ba 100644
--- a/exclude.c
+++ b/exclude.c
@@ -494,9 +494,9 @@ void add_implied_include(const char *arg, int skip_daemon_module)
maybe_add_literal_brackets_rule(rule, arg_len);
if (relative_paths && slash_cnt) {
filter_rule const *ent;
- int found = 0;
slash_cnt = 1;
for (p = new_pat + 1; (p = strchr(p, '/')) != NULL; p++) {
+ int found = 0;
*p = '\0';
for (ent = implied_filter_list.head; ent; ent = ent->next) {
if (ent != rule && strcmp(ent->pattern, new_pat) == 0) {
@@ -508,7 +508,7 @@ void add_implied_include(const char *arg, int skip_daemon_module)
filter_rule *R_rule = new0(filter_rule);
R_rule->rflags = FILTRULE_INCLUDE | FILTRULE_DIRECTORY;
/* Check if our sub-path has wildcards or escaped backslashes */
- if (saw_wild && strpbrk(rule->pattern, "*[?\\"))
+ if (saw_wild && strpbrk(new_pat, "*[?\\"))
R_rule->rflags |= FILTRULE_WILD;
R_rule->pattern = strdup(new_pat);
R_rule->u.slash_cnt = slash_cnt;
diff --git a/flist.c b/flist.c
index db11b353..82d686a6 100644
--- a/flist.c
+++ b/flist.c
@@ -756,7 +756,7 @@ static struct file_struct *recv_file_entry(int f, struct file_list *flist, int x
if (*thisname
&& (clean_fname(thisname, CFN_REFUSE_DOT_DOT_DIRS) < 0 || (!relative_paths && *thisname == '/'))) {
rprintf(FERROR, "ABORTING due to unsafe pathname from sender: %s\n", thisname);
- exit_cleanup(RERR_PROTOCOL);
+ exit_cleanup(RERR_UNSUPPORTED);
}
if (sanitize_paths)
@@ -988,16 +988,16 @@ static struct file_struct *recv_file_entry(int f, struct file_list *flist, int x
exit_cleanup(RERR_UNSUPPORTED);
}
- if (*thisname != '.' || thisname[1] != '\0') {
+ if (*thisname == '/' ? thisname[1] != '.' || thisname[2] != '\0' : *thisname != '.' || thisname[1] != '\0') {
int filt_flags = S_ISDIR(mode) ? NAME_IS_DIR : NAME_IS_FILE;
if (!trust_sender_filter /* a per-dir filter rule means we must trust the sender's filtering */
&& filter_list.head && check_server_filter(&filter_list, FINFO, thisname, filt_flags) < 0) {
rprintf(FERROR, "ERROR: rejecting excluded file-list name: %s\n", thisname);
- exit_cleanup(RERR_PROTOCOL);
+ exit_cleanup(RERR_UNSUPPORTED);
}
if (implied_filter_list.head && check_filter(&implied_filter_list, FINFO, thisname, filt_flags) <= 0) {
rprintf(FERROR, "ERROR: rejecting unrequested file-list name: %s\n", thisname);
- exit_cleanup(RERR_PROTOCOL);
+ exit_cleanup(RERR_UNSUPPORTED);
}
}
@@ -2642,7 +2642,7 @@ struct file_list *recv_file_list(int f, int dir_ndx)
rprintf(FERROR,
"ABORTING due to invalid path from sender: %s/%s\n",
cur_dir, file->basename);
- exit_cleanup(RERR_PROTOCOL);
+ exit_cleanup(RERR_UNSUPPORTED);
}
good_dirname = cur_dir;
}
diff --git a/hashtable.c b/hashtable.c
index e272f439..f7a8a6c9 100644
--- a/hashtable.c
+++ b/hashtable.c
@@ -350,6 +350,9 @@ void *hashtable_find(struct hashtable *tbl, int64 key, void *data_when_new)
-------------------------------------------------------------------------------
*/
+#define NON_ZERO_32(x) ((x) ? (x) : (uint32_t)1)
+#define NON_ZERO_64(x, y) ((x) || (y) ? (y) | (int64)(x) << 32 | (y) : (int64)1)
+
uint32_t hashlittle(const void *key, size_t length)
{
uint32_t a,b,c; /* internal state */
@@ -390,7 +393,7 @@ uint32_t hashlittle(const void *key, size_t length)
case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
case 1 : a+=k8[0]; break;
- case 0 : return c;
+ case 0 : return NON_ZERO_32(c);
}
} else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
@@ -436,7 +439,7 @@ uint32_t hashlittle(const void *key, size_t length)
break;
case 1 : a+=k8[0];
break;
- case 0 : return c; /* zero length requires no mixing */
+ case 0 : return NON_ZERO_32(c); /* zero length requires no mixing */
}
} else { /* need to read the key one byte at a time */
@@ -489,10 +492,171 @@ uint32_t hashlittle(const void *key, size_t length)
/* FALLTHROUGH */
case 1 : a+=k[0];
break;
- case 0 : return c;
+ case 0 : return NON_ZERO_32(c);
}
}
final(a,b,c);
- return c;
+ return NON_ZERO_32(c);
}
+
+#if SIZEOF_INT64 >= 8
+/*
+ * hashlittle2: return 2 32-bit hash values joined into an int64.
+ *
+ * This is identical to hashlittle(), except it returns two 32-bit hash
+ * values instead of just one. This is good enough for hash table
+ * lookup with 2^^64 buckets, or if you want a second hash if you're not
+ * happy with the first, or if you want a probably-unique 64-bit ID for
+ * the key. *pc is better mixed than *pb, so use *pc first. If you want
+ * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
+ */
+int64 hashlittle2(const void *key, size_t length)
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length);
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ const uint8_t *k8;
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return NON_ZERO_64(b, c);
+ }
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return NON_ZERO_64(b, c); /* zero length strings require no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ /* FALLTHROUGH */
+ case 11: c+=((uint32_t)k[10])<<16;
+ /* FALLTHROUGH */
+ case 10: c+=((uint32_t)k[9])<<8;
+ /* FALLTHROUGH */
+ case 9 : c+=k[8];
+ /* FALLTHROUGH */
+ case 8 : b+=((uint32_t)k[7])<<24;
+ /* FALLTHROUGH */
+ case 7 : b+=((uint32_t)k[6])<<16;
+ /* FALLTHROUGH */
+ case 6 : b+=((uint32_t)k[5])<<8;
+ /* FALLTHROUGH */
+ case 5 : b+=k[4];
+ /* FALLTHROUGH */
+ case 4 : a+=((uint32_t)k[3])<<24;
+ /* FALLTHROUGH */
+ case 3 : a+=((uint32_t)k[2])<<16;
+ /* FALLTHROUGH */
+ case 2 : a+=((uint32_t)k[1])<<8;
+ /* FALLTHROUGH */
+ case 1 : a+=k[0];
+ break;
+ case 0 : return NON_ZERO_64(b, c);
+ }
+ }
+
+ final(a,b,c);
+ return NON_ZERO_64(b, c);
+}
+#else
+#define hashlittle2(key, len) hashlittle(key, len)
+#endif
diff --git a/xattrs.c b/xattrs.c
index b9e79a1e..26e50a6f 100644
--- a/xattrs.c
+++ b/xattrs.c
@@ -381,20 +381,14 @@ static int64 xattr_lookup_hash(const item_list *xalp)
{
const rsync_xa *rxas = xalp->items;
size_t i;
- int64 key = hashlittle(&xalp->count, sizeof xalp->count);
+ int64 key = hashlittle2(&xalp->count, sizeof xalp->count);
for (i = 0; i < xalp->count; i++) {
- key += hashlittle(rxas[i].name, rxas[i].name_len);
+ key += hashlittle2(rxas[i].name, rxas[i].name_len);
if (rxas[i].datum_len > MAX_FULL_DATUM)
- key += hashlittle(rxas[i].datum, xattr_sum_len);
+ key += hashlittle2(rxas[i].datum, xattr_sum_len);
else
- key += hashlittle(rxas[i].datum, rxas[i].datum_len);
- }
-
- if (key == 0) {
- /* This is very unlikely, but we should never
- * return 0 as hashtable_find() doesn't like it. */
- return 1;
+ key += hashlittle2(rxas[i].datum, rxas[i].datum_len);
}
return key;
@@ -475,8 +469,6 @@ static int rsync_xal_store(item_list *xalp)
if (rsync_xal_h == NULL)
rsync_xal_h = hashtable_create(512, HT_KEY64);
- if (rsync_xal_h == NULL)
- out_of_memory("rsync_xal_h hashtable_create()");
new_ref = new0(rsync_xa_list_ref);
new_ref->ndx = ndx;
--
The rsync repository.
More information about the rsync-cvs
mailing list