[SCM] Samba Shared Repository - branch master updated
Volker Lendecke
vlendec at samba.org
Wed Jul 8 11:03:03 UTC 2020
The branch, master has been updated
via 4faab2a77a6 s3:dbwrap_watch: avoid recursion into dbwrap_do_locked() from dbwrap_watched_do_locked_{storev,delete}()
via a618776ac4e s3:locking: convert share_mode_lock.c to generate_unique_u64()
via 6c68c75b210 s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
via c25fb103ea8 lib/util: add generate_unique_u64() helper function
from cd5a2d015bf s3:smbcacls: Add support for DFS path
https://git.samba.org/?p=samba.git;a=shortlog;h=master
- Log -----------------------------------------------------------------
commit 4faab2a77a66497ea18ae6df8fee28e27dab1b4a
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu May 14 13:32:47 2020 +0200
s3:dbwrap_watch: avoid recursion into dbwrap_do_locked() from dbwrap_watched_do_locked_{storev,delete}()
This avoids a lot of overhead!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
6,877,542,529 PROGRAM TOTALS
590,000,773 lib/tdb/common/lock.c:tdb_lock_list
479,000,608 lib/tdb/common/lock.c:tdb_unlock
446,500,532 lib/tdb/common/io.c:tdb_read
364,000,824 lib/tdb/common/hash.c:tdb_jenkins_hash
285,000,532 lib/tdb/common/io.c:tdb_write
262,054,669 /x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms
206,500,496 lib/tdb/common/mutex.c:tdb_mutex_lock
193,000,176 lib/tdb/common/tdb.c:tdb_find
160,000,256 lib/talloc/talloc.c:_talloc_get_type_abort
148,500,297 lib/tdb/common/tdb.c:tdb_storev
140,000,196 lib/tdb/common/lock.c:tdb_lock
130,000,858 lib/util/debug.c:debuglevel_get_class
128,003,722 lib/talloc/talloc.c:_talloc_free
128,000,118 lib/tdb/common/tdb.c:tdb_parse_record
126,000,576 lib/tdb/common/lock.c:tdb_brlock.part.3
121,000,272 lib/tdb/common/mutex.c:tdb_mutex_unlock
118,000,225 /nptl/pthread_mutex_lock.c:__pthread_mutex_lock_full
112,750,222 lib/tdb/common/freelist.c:tdb_allocate_from_freelist
108,500,168 lib/tdb/common/io.c:tdb_ofs_read
102,500,000 lib/tdb/common/io.c:tdb_parse_data
by this:
5,706,522,398 PROGRAM TOTALS
434,000,617 lib/tdb/common/lock.c:tdb_lock_list
389,500,494 lib/tdb/common/io.c:tdb_read
359,000,488 lib/tdb/common/lock.c:tdb_unlock
285,000,532 lib/tdb/common/io.c:tdb_write
237,554,655 /x86_64/multiarch/memmove-vec-unaligned-erms.S:__memcpy_avx_unaligned_erms
208,000,668 lib/tdb/common/hash.c:tdb_jenkins_hash
206,500,496 lib/tdb/common/mutex.c:tdb_mutex_lock
160,000,256 lib/talloc/talloc.c:_talloc_get_type_abort
148,500,297 lib/tdb/common/tdb.c:tdb_storev
136,000,132 lib/tdb/common/tdb.c:tdb_find
130,000,858 lib/util/debug.c:debuglevel_get_class
126,000,576 lib/tdb/common/lock.c:tdb_brlock.part.3
121,000,272 lib/tdb/common/mutex.c:tdb_mutex_unlock
118,000,225 /nptl/pthread_mutex_lock.c:__pthread_mutex_lock_full
112,750,222 lib/tdb/common/freelist.c:tdb_allocate_from_freelist
112,000,168 lib/tdb/common/lock.c:tdb_lock
94,500,154 lib/tdb/common/io.c:tdb_ofs_read
94,000,188 /nptl/pthread_mutex_unlock.c:__pthread_mutex_unlock_full
86,000,086 lib/dbwrap/dbwrap.c:dbwrap_lock_order_lock
83,000,083 lib/dbwrap/dbwrap_tdb.c:db_tdb_do_locked
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
902834 locks/sec
real 0m11,103s
user 0m8,233s
sys 0m2,868s
vs.
1037262 locks/sec
real 0m9,685s
user 0m6,788s
sys 0m2,896s
Signed-off-by: Stefan Metzmacher <metze at samba.org>
Reviewed-by: Volker Lendecke <vl at samba.org>
Autobuild-User(master): Volker Lendecke <vl at samba.org>
Autobuild-Date(master): Wed Jul 8 11:02:39 UTC 2020 on sn-devel-184
commit a618776ac4e097e27e3c766d996323b1d86a7241
Author: Stefan Metzmacher <metze at samba.org>
Date: Tue Jul 7 11:49:27 2020 +0200
s3:locking: convert share_mode_lock.c to generate_unique_u64()
Instead of a sequence number that gets incremented we just
need a value that's not reused.
The is a similar change like the commit before at the g_lock.c
layer.
I expect a similar performance improvement here, but
I don't know a specific benchmark test to check.
Signed-off-by: Stefan Metzmacher <metze at samba.org>
Reviewed-by: Volker Lendecke <vl at samba.org>
commit 6c68c75b2100ba95da9575350b3a9d14211a5cc7
Author: Stefan Metzmacher <metze at samba.org>
Date: Tue May 19 02:58:23 2020 +0200
s3:g_lock: avoid very expensive generate_random_buffer() in g_lock_parse()
We don't require a sequence number that is incremented,
we just need a value that's not reused.
We use the new generate_unique_u64(), which is much cheaper!
Using smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 500000
under valgrind --tool=callgrind...
This change replaces this:
13,129,925,659 PROGRAM TOTALS
4,125,752,958 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
1,257,005,866 ???:_nettle_aes_encrypt [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
571,503,429 ???:_nettle_aes_set_key [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
by this:
6,877,826,377 PROGRAM TOTALS
590,000,773 bin/default/../../lib/tdb/common/lock.c:tdb_lock_list
479,000,608 bin/default/../../lib/tdb/common/lock.c:tdb_unlock
...
12,500,033 bin/default/../../lib/util/genrand_util.c:generate_unique_u64
...
8,996,970 ???:_nettle_sha256_compress [/usr/lib/x86_64-linux-gnu/libnettle.so.6.4]
time smbtorture3 //foo/bar -U% local-g-lock-ping-pong -o 5000000
gives:
537426 locks/sec
real 0m19,071s
user 0m15,061s
sys 0m3,999s
vs.
900956 locks/sec
real 0m11,155s
user 0m8,293s
sys 0m2,860s
Signed-off-by: Stefan Metzmacher <metze at samba.org>
Reviewed-by: Volker Lendecke <vl at samba.org>
commit c25fb103ea8b1a822005afbc791610329fd0295a
Author: Stefan Metzmacher <metze at samba.org>
Date: Tue Jun 9 16:19:50 2020 +0200
lib/util: add generate_unique_u64() helper function
Signed-off-by: Stefan Metzmacher <metze at samba.org>
Reviewed-by: Volker Lendecke <vl at samba.org>
-----------------------------------------------------------------------
Summary of changes:
lib/util/genrand_util.c | 23 +++++++++++++++
lib/util/samba_util.h | 29 ++++++++++++++++++-
source3/lib/dbwrap/dbwrap_watch.c | 60 ++++++++++++++++++++++++++++++++++++---
source3/lib/g_lock.c | 38 ++++++++++++-------------
source3/librpc/idl/open_files.idl | 2 +-
source3/locking/share_mode_lock.c | 29 +++++++++----------
6 files changed, 141 insertions(+), 40 deletions(-)
Changeset truncated at 500 lines:
diff --git a/lib/util/genrand_util.c b/lib/util/genrand_util.c
index 05d1f3ef6e5..26b52a1c814 100644
--- a/lib/util/genrand_util.c
+++ b/lib/util/genrand_util.c
@@ -47,7 +47,30 @@ _PUBLIC_ uint64_t generate_random_u64(void)
return BVAL(v, 0);
}
+static struct generate_unique_u64_state {
+ uint64_t next_value;
+ int pid;
+} generate_unique_u64_state;
+_PUBLIC_ uint64_t generate_unique_u64(uint64_t veto_value)
+{
+ int pid = getpid();
+
+ if (unlikely(pid != generate_unique_u64_state.pid)) {
+ generate_unique_u64_state = (struct generate_unique_u64_state) {
+ .pid = pid,
+ .next_value = veto_value,
+ };
+ }
+
+ while (unlikely(generate_unique_u64_state.next_value == veto_value)) {
+ generate_nonce_buffer(
+ (void *)&generate_unique_u64_state.next_value,
+ sizeof(generate_unique_u64_state.next_value));
+ }
+
+ return generate_unique_u64_state.next_value++;
+}
/**
Microsoft composed the following rules (among others) for quality
diff --git a/lib/util/samba_util.h b/lib/util/samba_util.h
index f0aa42e7271..5a81baa80b6 100644
--- a/lib/util/samba_util.h
+++ b/lib/util/samba_util.h
@@ -94,10 +94,37 @@ _PUBLIC_ int sys_getnameinfo(const struct sockaddr *psa,
_PUBLIC_ uint32_t generate_random(void);
/**
- generate a single random uint64_t
+ * generate a single random uint64_t
+ * @see generate_unique_u64
**/
_PUBLIC_ uint64_t generate_random_u64(void);
+/**
+ * @brief Generate random nonces usable for re-use detection.
+ *
+ * We have a lot of places which require a unique id that can
+ * be used as a unique identitier for caching states.
+ *
+ * Always using generate_nonce_buffer() has it's performance costs,
+ * it's typically much better than generate_random_buffer(), but
+ * still it's overhead we want to avoid in performance critical
+ * workloads.
+ *
+ * We call generate_nonce_buffer() just once per given state
+ * and process.
+ *
+ * This is much lighter than generate_random_u64() and it's
+ * designed for performance critical code paths.
+ *
+ * @veto_value It is garanteed that the return value if different from
+ * the veto_value.
+ *
+ * @return a unique value per given state and process
+ *
+ * @see generate_random_u64
+ */
+uint64_t generate_unique_u64(uint64_t veto_value);
+
/**
very basic password quality checker
**/
diff --git a/source3/lib/dbwrap/dbwrap_watch.c b/source3/lib/dbwrap/dbwrap_watch.c
index 206eabc8d5d..c442bf2e8f6 100644
--- a/source3/lib/dbwrap/dbwrap_watch.c
+++ b/source3/lib/dbwrap/dbwrap_watch.c
@@ -287,6 +287,14 @@ static int db_watched_subrec_destructor(struct db_watched_subrec *s)
return 0;
}
+struct dbwrap_watched_subrec_wakeup_state {
+ struct messaging_context *msg_ctx;
+};
+static void dbwrap_watched_subrec_wakeup_fn(
+ struct db_record *rec,
+ TDB_DATA value,
+ void *private_data);
+
struct dbwrap_watched_do_locked_state {
struct db_context *db;
void (*fn)(struct db_record *rec,
@@ -296,6 +304,20 @@ struct dbwrap_watched_do_locked_state {
struct db_watched_subrec subrec;
+ /*
+ * This contains the initial value we got
+ * passed to dbwrap_watched_do_locked_fn()
+ *
+ * It's only used in order to pass it
+ * to dbwrap_watched_subrec_wakeup_fn()
+ * in dbwrap_watched_do_locked_{storev,delete}()
+ *
+ * It gets cleared after the first call to
+ * dbwrap_watched_subrec_wakeup_fn() as we
+ * only need to wakeup once per dbwrap_do_locked().
+ */
+ TDB_DATA wakeup_value;
+
NTSTATUS status;
};
@@ -305,8 +327,20 @@ static NTSTATUS dbwrap_watched_do_locked_storev(
{
struct dbwrap_watched_do_locked_state *state = rec->private_data;
struct db_watched_subrec *subrec = &state->subrec;
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ state->db->private_data, struct db_watched_ctx);
+ struct dbwrap_watched_subrec_wakeup_state wakeup_state = {
+ .msg_ctx = ctx->msg,
+ };
NTSTATUS status;
+ /*
+ * Wakeup only needs to happen once.
+ * so we clear state->wakeup_value after the first run
+ */
+ dbwrap_watched_subrec_wakeup_fn(rec, state->wakeup_value, &wakeup_state);
+ state->wakeup_value = (TDB_DATA) { .dsize = 0, };
+
status = dbwrap_watched_subrec_storev(rec, subrec, dbufs, num_dbufs,
flags);
return status;
@@ -316,8 +350,20 @@ static NTSTATUS dbwrap_watched_do_locked_delete(struct db_record *rec)
{
struct dbwrap_watched_do_locked_state *state = rec->private_data;
struct db_watched_subrec *subrec = &state->subrec;
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ state->db->private_data, struct db_watched_ctx);
+ struct dbwrap_watched_subrec_wakeup_state wakeup_state = {
+ .msg_ctx = ctx->msg,
+ };
NTSTATUS status;
+ /*
+ * Wakeup only needs to happen once.
+ * so we clear state->wakeup_value after the first run
+ */
+ dbwrap_watched_subrec_wakeup_fn(rec, state->wakeup_value, &wakeup_state);
+ state->wakeup_value = (TDB_DATA) { .dsize = 0, };
+
status = dbwrap_watched_subrec_delete(rec, subrec);
return status;
}
@@ -343,6 +389,7 @@ static void dbwrap_watched_do_locked_fn(
state->subrec = (struct db_watched_subrec) {
.subrec = subrec
};
+ state->wakeup_value = subrec_value;
ok = dbwrap_watch_rec_parse(subrec_value, NULL, NULL, &value);
if (!ok) {
@@ -382,10 +429,6 @@ static NTSTATUS dbwrap_watched_do_locked(struct db_context *db, TDB_DATA key,
return state.status;
}
-struct dbwrap_watched_subrec_wakeup_state {
- struct messaging_context *msg_ctx;
-};
-
static void dbwrap_watched_subrec_wakeup_fn(
struct db_record *rec,
TDB_DATA value,
@@ -451,6 +494,15 @@ static void dbwrap_watched_subrec_wakeup(
};
NTSTATUS status;
+ if (rec->storev == dbwrap_watched_do_locked_storev) {
+ /*
+ * This is handled in the caller,
+ * as we need to avoid recursion
+ * into dbwrap_do_locked().
+ */
+ return;
+ }
+
status = dbwrap_do_locked(
backend,
subrec->subrec->key,
diff --git a/source3/lib/g_lock.c b/source3/lib/g_lock.c
index d9843359dd1..c36539393e1 100644
--- a/source3/lib/g_lock.c
+++ b/source3/lib/g_lock.c
@@ -43,7 +43,7 @@ struct g_lock {
struct server_id exclusive;
size_t num_shared;
uint8_t *shared;
- uint64_t data_seqnum;
+ uint64_t unique_data_epoch;
size_t datalen;
uint8_t *data;
};
@@ -52,15 +52,15 @@ static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
{
struct server_id exclusive;
size_t num_shared, shared_len;
- uint64_t data_seqnum;
+ uint64_t unique_data_epoch;
if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
sizeof(uint64_t) + /* seqnum */
sizeof(uint32_t))) { /* num_shared */
- struct g_lock ret = { .exclusive.pid = 0 };
- generate_random_buffer(
- (uint8_t *)&ret.data_seqnum,
- sizeof(ret.data_seqnum));
+ struct g_lock ret = {
+ .exclusive.pid = 0,
+ .unique_data_epoch = generate_unique_u64(0),
+ };
*lck = ret;
return true;
}
@@ -69,7 +69,7 @@ static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
buf += SERVER_ID_BUF_LENGTH;
buflen -= SERVER_ID_BUF_LENGTH;
- data_seqnum = BVAL(buf, 0);
+ unique_data_epoch = BVAL(buf, 0);
buf += sizeof(uint64_t);
buflen -= sizeof(uint64_t);
@@ -90,7 +90,7 @@ static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
.exclusive = exclusive,
.num_shared = num_shared,
.shared = buf,
- .data_seqnum = data_seqnum,
+ .unique_data_epoch = unique_data_epoch,
.datalen = buflen-shared_len,
.data = buf+shared_len,
};
@@ -160,7 +160,7 @@ static NTSTATUS g_lock_store(
}
server_id_put(exclusive, lck->exclusive);
- SBVAL(seqnum_buf, 0, lck->data_seqnum);
+ SBVAL(seqnum_buf, 0, lck->unique_data_epoch);
if (new_shared != NULL) {
if (lck->num_shared >= UINT32_MAX) {
@@ -967,7 +967,7 @@ static void g_lock_writev_data_fn(
return;
}
- lck.data_seqnum += 1;
+ lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
lck.data = NULL;
lck.datalen = 0;
state->status = g_lock_store(
@@ -1207,7 +1207,7 @@ struct g_lock_watch_data_state {
TDB_DATA key;
struct server_id blocker;
bool blockerdead;
- uint64_t data_seqnum;
+ uint64_t unique_data_epoch;
NTSTATUS status;
};
@@ -1231,9 +1231,9 @@ static void g_lock_watch_data_send_fn(
state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
return;
}
- state->data_seqnum = lck.data_seqnum;
+ state->unique_data_epoch = lck.unique_data_epoch;
- DBG_DEBUG("state->data_seqnum=%"PRIu64"\n", state->data_seqnum);
+ DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
subreq = dbwrap_watched_watch_send(
state, state->ev, rec, state->blocker);
@@ -1305,11 +1305,11 @@ static void g_lock_watch_data_done_fn(
return;
}
- if (lck.data_seqnum != state->data_seqnum) {
- DBG_DEBUG("lck.data_seqnum=%"PRIu64", "
- "state->data_seqnum=%"PRIu64"\n",
- lck.data_seqnum,
- state->data_seqnum);
+ if (lck.unique_data_epoch != state->unique_data_epoch) {
+ DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
+ "state->unique_data_epoch=%"PRIu64"\n",
+ lck.unique_data_epoch,
+ state->unique_data_epoch);
state->status = NT_STATUS_OK;
return;
}
@@ -1394,7 +1394,7 @@ static void g_lock_wake_watchers_fn(
return;
}
- lck.data_seqnum += 1;
+ lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
status = g_lock_store(rec, &lck, NULL, NULL, 0);
if (!NT_STATUS_IS_OK(status)) {
diff --git a/source3/librpc/idl/open_files.idl b/source3/librpc/idl/open_files.idl
index 432c1e71bc0..891ff1f763d 100644
--- a/source3/librpc/idl/open_files.idl
+++ b/source3/librpc/idl/open_files.idl
@@ -53,7 +53,7 @@ interface open_files
} share_mode_flags;
typedef [public] struct {
- hyper sequence_number;
+ hyper unique_content_epoch;
share_mode_flags flags;
[string,charset(UTF8)] char *servicepath;
[string,charset(UTF8)] char *base_name;
diff --git a/source3/locking/share_mode_lock.c b/source3/locking/share_mode_lock.c
index a93141304b2..ba0bc2b1e7b 100644
--- a/source3/locking/share_mode_lock.c
+++ b/source3/locking/share_mode_lock.c
@@ -161,9 +161,9 @@ static void share_mode_memcache_store(struct share_mode_data *d)
const DATA_BLOB key = memcache_key(&d->id);
struct file_id_buf idbuf;
- DBG_DEBUG("stored entry for file %s seq %"PRIx64" key %s\n",
+ DBG_DEBUG("stored entry for file %s epoch %"PRIx64" key %s\n",
d->base_name,
- d->sequence_number,
+ d->unique_content_epoch,
file_id_str_buf(d->id, &idbuf));
/* Ensure everything stored in the cache is pristine. */
@@ -193,13 +193,13 @@ static void share_mode_memcache_store(struct share_mode_data *d)
*/
static enum ndr_err_code get_share_mode_blob_header(
- const uint8_t *buf, size_t buflen, uint64_t *pseq, uint16_t *pflags)
+ const uint8_t *buf, size_t buflen, uint64_t *pepoch, uint16_t *pflags)
{
struct ndr_pull ndr = {
.data = discard_const_p(uint8_t, buf),
.data_size = buflen,
};
- NDR_CHECK(ndr_pull_hyper(&ndr, NDR_SCALARS, pseq));
+ NDR_CHECK(ndr_pull_hyper(&ndr, NDR_SCALARS, pepoch));
NDR_CHECK(ndr_pull_uint16(&ndr, NDR_SCALARS, pflags));
return NDR_ERR_SUCCESS;
}
@@ -278,7 +278,7 @@ static struct share_mode_data *share_mode_memcache_fetch(
{
enum ndr_err_code ndr_err;
struct share_mode_data *d;
- uint64_t sequence_number;
+ uint64_t unique_content_epoch;
uint16_t flags;
void *ptr;
struct file_id id;
@@ -303,7 +303,7 @@ static struct share_mode_data *share_mode_memcache_fetch(
}
/* sequence number key is at start of blob. */
ndr_err = get_share_mode_blob_header(
- buf, buflen, &sequence_number, &flags);
+ buf, buflen, &unique_content_epoch, &flags);
if (ndr_err != NDR_ERR_SUCCESS) {
/* Bad blob. Remove entry. */
DBG_DEBUG("bad blob %u key %s\n",
@@ -316,11 +316,11 @@ static struct share_mode_data *share_mode_memcache_fetch(
}
d = (struct share_mode_data *)ptr;
- if (d->sequence_number != sequence_number) {
- DBG_DEBUG("seq changed (cached %"PRIx64") (new %"PRIx64") "
+ if (d->unique_content_epoch != unique_content_epoch) {
+ DBG_DEBUG("epoch changed (cached %"PRIx64") (new %"PRIx64") "
"for key %s\n",
- d->sequence_number,
- sequence_number,
+ d->unique_content_epoch,
+ unique_content_epoch,
file_id_str_buf(id, &idbuf));
/* Cache out of date. Remove entry. */
memcache_delete(NULL,
@@ -346,9 +346,9 @@ static struct share_mode_data *share_mode_memcache_fetch(
/* And reset the destructor to none. */
talloc_set_destructor(d, NULL);
- DBG_DEBUG("fetched entry for file %s seq %"PRIx64" key %s\n",
+ DBG_DEBUG("fetched entry for file %s epoch %"PRIx64" key %s\n",
d->base_name,
- d->sequence_number,
+ d->unique_content_epoch,
file_id_str_buf(id, &idbuf));
return d;
@@ -662,7 +662,7 @@ static NTSTATUS share_mode_data_store(struct share_mode_data *d)
NDR_PRINT_DEBUG(share_mode_data, d);
}
- d->sequence_number += 1;
+ d->unique_content_epoch = generate_unique_u64(d->unique_content_epoch);
status = locking_tdb_data_fetch(key, d, <db);
if (!NT_STATUS_IS_OK(status)) {
@@ -714,8 +714,7 @@ static struct share_mode_data *fresh_share_mode_lock(
if (d == NULL) {
goto fail;
}
- /* New record - new sequence number. */
- generate_random_buffer((uint8_t *)&d->sequence_number, 8);
+ d->unique_content_epoch = generate_unique_u64(0);
d->base_name = talloc_strdup(d, smb_fname->base_name);
if (d->base_name == NULL) {
--
Samba Shared Repository
More information about the samba-cvs
mailing list