From: Frank Lahm Date: Wed, 19 Jan 2011 11:21:02 +0000 (+0100) Subject: Import tsocket from samba X-Git-Url: https://arthur.barton.de/gitweb/?a=commitdiff_plain;h=e1764ff62aa106683e17f8465d3600d5054c81ad;p=netatalk.git Import tsocket from samba --- diff --git a/configure.in b/configure.in index 96133212..326df037 100644 --- a/configure.in +++ b/configure.in @@ -1335,6 +1335,7 @@ AC_OUTPUT([Makefile libatalk/util/Makefile libatalk/talloc/Makefile libatalk/tevent/Makefile + libatalk/tsocket/Makefile libatalk/tdb/Makefile libatalk/unicode/Makefile libatalk/unicode/charsets/Makefile diff --git a/etc/netalockd/main.c b/etc/netalockd/main.c index 36ce95e2..ef02a319 100644 --- a/etc/netalockd/main.c +++ b/etc/netalockd/main.c @@ -17,9 +17,9 @@ #include #include -#include #include #include +#include static int reloadconfig; @@ -92,15 +92,15 @@ static void set_signal(void) int main(int ac, char **av) { - sigset_t sigs; - int ret; - - /* Log SIGBUS/SIGSEGV SBT */ - fault_setup(NULL); + struct tevent_context *event_ctx; + sigset_t sigs; + int ret; /* Default log setup: log to syslog */ + set_processname("netalockd"); setuplog("default log_note"); + /* Check lockfile and daemonize */ switch(server_lock("netalockd", _PATH_NETALOCKD_LOCK, 0)) { case -1: /* error */ exit(EXITERR_SYS); @@ -110,13 +110,22 @@ int main(int ac, char **av) exit(0); } + /* Setup signal stuff */ set_signal(); + /* Log SIGBUS/SIGSEGV SBT */ + fault_setup(NULL); - while (1) { - if (reloadconfig) { - reloadconfig = 0; - } + if ((event_ctx = tevent_context_init(talloc_autofree_context())) == NULL) { + LOG(log_error, logtype_default, "Error initializing event lib"); + exit(1); } + LOG(log_warning, logtype_default, "Running..."); + + /* wait for events - this is where we sit for most of our life */ + tevent_loop_wait(event_ctx); + + talloc_free(event_ctx); + return 0; } diff --git a/libatalk/Makefile.am b/libatalk/Makefile.am index 17311efb..4d5cdc03 100644 --- a/libatalk/Makefile.am +++ b/libatalk/Makefile.am @@ -1,7 +1,7 @@ # Makefile.am for libatalk/ -SUBDIRS = acl adouble bstring compat cnid dsi talloc tdb tevent util unicode vfs +SUBDIRS = acl adouble bstring compat cnid dsi talloc tdb tevent tsocket util unicode vfs lib_LTLIBRARIES = libatalk.la @@ -14,7 +14,10 @@ libatalk_la_LIBADD = \ compat/libcompat.la \ dsi/libdsi.la \ util/libutil.la \ + talloc/libtalloc.la \ tdb/libtdb.la \ + tevent/libtevent.la \ + tsocket/libtsocket.la \ unicode/libunicode.la \ vfs/libvfs.la @@ -25,7 +28,10 @@ libatalk_la_DEPENDENCIES = \ compat/libcompat.la \ dsi/libdsi.la \ util/libutil.la \ + talloc/libtalloc.la \ tdb/libtdb.la \ + tevent/libtevent.la \ + tsocket/libtsocket.la \ unicode/libunicode.la \ vfs/libvfs.la diff --git a/libatalk/tevent/tevent_signal.c b/libatalk/tevent/tevent_signal.c index ad754816..0b7b9f6b 100644 --- a/libatalk/tevent/tevent_signal.c +++ b/libatalk/tevent/tevent_signal.c @@ -24,6 +24,7 @@ */ #include +#include #include "tevent_internal.h" #include "tevent_util.h" diff --git a/libatalk/tevent/tevent_standard.c b/libatalk/tevent/tevent_standard.c index 67b904cf..c4063014 100644 --- a/libatalk/tevent/tevent_standard.c +++ b/libatalk/tevent/tevent_standard.c @@ -32,6 +32,7 @@ */ #include +#include #include "tevent_util.h" #include "tevent_internal.h" diff --git a/libatalk/tsocket/.gitignore b/libatalk/tsocket/.gitignore new file mode 100644 index 00000000..c38bb9ea --- /dev/null +++ b/libatalk/tsocket/.gitignore @@ -0,0 +1,7 @@ +Makefile +Makefile.in +*.lo +*.la +.deps +.libs +*.o diff --git a/libatalk/tsocket/Makefile.am b/libatalk/tsocket/Makefile.am new file mode 100644 index 00000000..35743152 --- /dev/null +++ b/libatalk/tsocket/Makefile.am @@ -0,0 +1,14 @@ +# Makefile.am for libatalk/tevent/ + +noinst_LTLIBRARIES = libtsocket.la + +AM_CFLAGS = + +libtsocket_la_SOURCES = \ + tsocket_bsd.c \ + tsocket.c \ + tsocket_helpers.c + +noinst_HEADERS = \ + tsocket.h \ + tsocket_internal.h diff --git a/libatalk/tsocket/tsocket.c b/libatalk/tsocket/tsocket.c new file mode 100644 index 00000000..b8dd6c89 --- /dev/null +++ b/libatalk/tsocket/tsocket.c @@ -0,0 +1,813 @@ +/* + Unix SMB/CIFS implementation. + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the tsocket + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . +*/ + +#include "replace.h" +#include "system/filesys.h" +#include "tsocket.h" +#include "tsocket_internal.h" + +int tsocket_simple_int_recv(struct tevent_req *req, int *perrno) +{ + enum tevent_req_state state; + uint64_t error; + + if (!tevent_req_is_error(req, &state, &error)) { + return 0; + } + + switch (state) { + case TEVENT_REQ_NO_MEMORY: + *perrno = ENOMEM; + return -1; + case TEVENT_REQ_TIMED_OUT: + *perrno = ETIMEDOUT; + return -1; + case TEVENT_REQ_USER_ERROR: + *perrno = (int)error; + return -1; + default: + *perrno = EIO; + return -1; + } + + *perrno = EIO; + return -1; +} + +struct tsocket_address *_tsocket_address_create(TALLOC_CTX *mem_ctx, + const struct tsocket_address_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location) +{ + void **ppstate = (void **)pstate; + struct tsocket_address *addr; + + addr = talloc_zero(mem_ctx, struct tsocket_address); + if (!addr) { + return NULL; + } + addr->ops = ops; + addr->location = location; + addr->private_data = talloc_size(addr, psize); + if (!addr->private_data) { + talloc_free(addr); + return NULL; + } + talloc_set_name_const(addr->private_data, type); + + *ppstate = addr->private_data; + return addr; +} + +char *tsocket_address_string(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx) +{ + if (!addr) { + return talloc_strdup(mem_ctx, "NULL"); + } + return addr->ops->string(addr, mem_ctx); +} + +struct tsocket_address *_tsocket_address_copy(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx, + const char *location) +{ + return addr->ops->copy(addr, mem_ctx, location); +} + +struct tdgram_context { + const char *location; + const struct tdgram_context_ops *ops; + void *private_data; + + struct tevent_req *recvfrom_req; + struct tevent_req *sendto_req; +}; + +static int tdgram_context_destructor(struct tdgram_context *dgram) +{ + if (dgram->recvfrom_req) { + tevent_req_received(dgram->recvfrom_req); + } + + if (dgram->sendto_req) { + tevent_req_received(dgram->sendto_req); + } + + return 0; +} + +struct tdgram_context *_tdgram_context_create(TALLOC_CTX *mem_ctx, + const struct tdgram_context_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location) +{ + struct tdgram_context *dgram; + void **ppstate = (void **)pstate; + void *state; + + dgram = talloc(mem_ctx, struct tdgram_context); + if (dgram == NULL) { + return NULL; + } + dgram->location = location; + dgram->ops = ops; + dgram->recvfrom_req = NULL; + dgram->sendto_req = NULL; + + state = talloc_size(dgram, psize); + if (state == NULL) { + talloc_free(dgram); + return NULL; + } + talloc_set_name_const(state, type); + + dgram->private_data = state; + + talloc_set_destructor(dgram, tdgram_context_destructor); + + *ppstate = state; + return dgram; +} + +void *_tdgram_context_data(struct tdgram_context *dgram) +{ + return dgram->private_data; +} + +struct tdgram_recvfrom_state { + const struct tdgram_context_ops *ops; + struct tdgram_context *dgram; + uint8_t *buf; + size_t len; + struct tsocket_address *src; +}; + +static int tdgram_recvfrom_destructor(struct tdgram_recvfrom_state *state) +{ + if (state->dgram) { + state->dgram->recvfrom_req = NULL; + } + + return 0; +} + +static void tdgram_recvfrom_done(struct tevent_req *subreq); + +struct tevent_req *tdgram_recvfrom_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram) +{ + struct tevent_req *req; + struct tdgram_recvfrom_state *state; + struct tevent_req *subreq; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_recvfrom_state); + if (req == NULL) { + return NULL; + } + + state->ops = dgram->ops; + state->dgram = dgram; + state->buf = NULL; + state->len = 0; + state->src = NULL; + + if (dgram->recvfrom_req) { + tevent_req_error(req, EBUSY); + goto post; + } + dgram->recvfrom_req = req; + + talloc_set_destructor(state, tdgram_recvfrom_destructor); + + subreq = state->ops->recvfrom_send(state, ev, dgram); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tdgram_recvfrom_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_recvfrom_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tdgram_recvfrom_state *state = tevent_req_data(req, + struct tdgram_recvfrom_state); + ssize_t ret; + int sys_errno; + + ret = state->ops->recvfrom_recv(subreq, &sys_errno, state, + &state->buf, &state->src); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + state->len = ret; + + tevent_req_done(req); +} + +ssize_t tdgram_recvfrom_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + uint8_t **buf, + struct tsocket_address **src) +{ + struct tdgram_recvfrom_state *state = tevent_req_data(req, + struct tdgram_recvfrom_state); + ssize_t ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + *buf = talloc_move(mem_ctx, &state->buf); + ret = state->len; + if (src) { + *src = talloc_move(mem_ctx, &state->src); + } + } + + tevent_req_received(req); + return ret; +} + +struct tdgram_sendto_state { + const struct tdgram_context_ops *ops; + struct tdgram_context *dgram; + ssize_t ret; +}; + +static int tdgram_sendto_destructor(struct tdgram_sendto_state *state) +{ + if (state->dgram) { + state->dgram->sendto_req = NULL; + } + + return 0; +} + +static void tdgram_sendto_done(struct tevent_req *subreq); + +struct tevent_req *tdgram_sendto_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + const uint8_t *buf, size_t len, + const struct tsocket_address *dst) +{ + struct tevent_req *req; + struct tdgram_sendto_state *state; + struct tevent_req *subreq; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_sendto_state); + if (req == NULL) { + return NULL; + } + + state->ops = dgram->ops; + state->dgram = dgram; + state->ret = -1; + + if (len == 0) { + tevent_req_error(req, EINVAL); + goto post; + } + + if (dgram->sendto_req) { + tevent_req_error(req, EBUSY); + goto post; + } + dgram->sendto_req = req; + + talloc_set_destructor(state, tdgram_sendto_destructor); + + subreq = state->ops->sendto_send(state, ev, dgram, + buf, len, dst); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tdgram_sendto_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_sendto_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tdgram_sendto_state *state = tevent_req_data(req, + struct tdgram_sendto_state); + ssize_t ret; + int sys_errno; + + ret = state->ops->sendto_recv(subreq, &sys_errno); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + state->ret = ret; + + tevent_req_done(req); +} + +ssize_t tdgram_sendto_recv(struct tevent_req *req, + int *perrno) +{ + struct tdgram_sendto_state *state = tevent_req_data(req, + struct tdgram_sendto_state); + ssize_t ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tdgram_disconnect_state { + const struct tdgram_context_ops *ops; +}; + +static void tdgram_disconnect_done(struct tevent_req *subreq); + +struct tevent_req *tdgram_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram) +{ + struct tevent_req *req; + struct tdgram_disconnect_state *state; + struct tevent_req *subreq; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_disconnect_state); + if (req == NULL) { + return NULL; + } + + state->ops = dgram->ops; + + if (dgram->recvfrom_req || dgram->sendto_req) { + tevent_req_error(req, EBUSY); + goto post; + } + + subreq = state->ops->disconnect_send(state, ev, dgram); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tdgram_disconnect_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_disconnect_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tdgram_disconnect_state *state = tevent_req_data(req, + struct tdgram_disconnect_state); + int ret; + int sys_errno; + + ret = state->ops->disconnect_recv(subreq, &sys_errno); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + tevent_req_done(req); +} + +int tdgram_disconnect_recv(struct tevent_req *req, + int *perrno) +{ + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + + tevent_req_received(req); + return ret; +} + +struct tstream_context { + const char *location; + const struct tstream_context_ops *ops; + void *private_data; + + struct tevent_req *readv_req; + struct tevent_req *writev_req; +}; + +static int tstream_context_destructor(struct tstream_context *stream) +{ + if (stream->readv_req) { + tevent_req_received(stream->readv_req); + } + + if (stream->writev_req) { + tevent_req_received(stream->writev_req); + } + + return 0; +} + +struct tstream_context *_tstream_context_create(TALLOC_CTX *mem_ctx, + const struct tstream_context_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location) +{ + struct tstream_context *stream; + void **ppstate = (void **)pstate; + void *state; + + stream = talloc(mem_ctx, struct tstream_context); + if (stream == NULL) { + return NULL; + } + stream->location = location; + stream->ops = ops; + stream->readv_req = NULL; + stream->writev_req = NULL; + + state = talloc_size(stream, psize); + if (state == NULL) { + talloc_free(stream); + return NULL; + } + talloc_set_name_const(state, type); + + stream->private_data = state; + + talloc_set_destructor(stream, tstream_context_destructor); + + *ppstate = state; + return stream; +} + +void *_tstream_context_data(struct tstream_context *stream) +{ + return stream->private_data; +} + +ssize_t tstream_pending_bytes(struct tstream_context *stream) +{ + return stream->ops->pending_bytes(stream); +} + +struct tstream_readv_state { + const struct tstream_context_ops *ops; + struct tstream_context *stream; + int ret; +}; + +static int tstream_readv_destructor(struct tstream_readv_state *state) +{ + if (state->stream) { + state->stream->readv_req = NULL; + } + + return 0; +} + +static void tstream_readv_done(struct tevent_req *subreq); + +struct tevent_req *tstream_readv_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct iovec *vector, + size_t count) +{ + struct tevent_req *req; + struct tstream_readv_state *state; + struct tevent_req *subreq; + int to_read = 0; + size_t i; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_readv_state); + if (req == NULL) { + return NULL; + } + + state->ops = stream->ops; + state->stream = stream; + state->ret = -1; + + /* first check if the input is ok */ +#ifdef IOV_MAX + if (count > IOV_MAX) { + tevent_req_error(req, EMSGSIZE); + goto post; + } +#endif + + for (i=0; i < count; i++) { + int tmp = to_read; + tmp += vector[i].iov_len; + + if (tmp < to_read) { + tevent_req_error(req, EMSGSIZE); + goto post; + } + + to_read = tmp; + } + + if (to_read == 0) { + tevent_req_error(req, EINVAL); + goto post; + } + + if (stream->readv_req) { + tevent_req_error(req, EBUSY); + goto post; + } + stream->readv_req = req; + + talloc_set_destructor(state, tstream_readv_destructor); + + subreq = state->ops->readv_send(state, ev, stream, vector, count); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tstream_readv_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_readv_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_readv_state *state = tevent_req_data(req, + struct tstream_readv_state); + ssize_t ret; + int sys_errno; + + ret = state->ops->readv_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + state->ret = ret; + + tevent_req_done(req); +} + +int tstream_readv_recv(struct tevent_req *req, + int *perrno) +{ + struct tstream_readv_state *state = tevent_req_data(req, + struct tstream_readv_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_writev_state { + const struct tstream_context_ops *ops; + struct tstream_context *stream; + int ret; +}; + +static int tstream_writev_destructor(struct tstream_writev_state *state) +{ + if (state->stream) { + state->stream->writev_req = NULL; + } + + return 0; +} + +static void tstream_writev_done(struct tevent_req *subreq); + +struct tevent_req *tstream_writev_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + const struct iovec *vector, + size_t count) +{ + struct tevent_req *req; + struct tstream_writev_state *state; + struct tevent_req *subreq; + int to_write = 0; + size_t i; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_writev_state); + if (req == NULL) { + return NULL; + } + + state->ops = stream->ops; + state->stream = stream; + state->ret = -1; + + /* first check if the input is ok */ +#ifdef IOV_MAX + if (count > IOV_MAX) { + tevent_req_error(req, EMSGSIZE); + goto post; + } +#endif + + for (i=0; i < count; i++) { + int tmp = to_write; + tmp += vector[i].iov_len; + + if (tmp < to_write) { + tevent_req_error(req, EMSGSIZE); + goto post; + } + + to_write = tmp; + } + + if (to_write == 0) { + tevent_req_error(req, EINVAL); + goto post; + } + + if (stream->writev_req) { + tevent_req_error(req, EBUSY); + goto post; + } + stream->writev_req = req; + + talloc_set_destructor(state, tstream_writev_destructor); + + subreq = state->ops->writev_send(state, ev, stream, vector, count); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tstream_writev_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_writev_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_writev_state *state = tevent_req_data(req, + struct tstream_writev_state); + ssize_t ret; + int sys_errno; + + ret = state->ops->writev_recv(subreq, &sys_errno); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + state->ret = ret; + + tevent_req_done(req); +} + +int tstream_writev_recv(struct tevent_req *req, + int *perrno) +{ + struct tstream_writev_state *state = tevent_req_data(req, + struct tstream_writev_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_disconnect_state { + const struct tstream_context_ops *ops; +}; + +static void tstream_disconnect_done(struct tevent_req *subreq); + +struct tevent_req *tstream_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream) +{ + struct tevent_req *req; + struct tstream_disconnect_state *state; + struct tevent_req *subreq; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_disconnect_state); + if (req == NULL) { + return NULL; + } + + state->ops = stream->ops; + + if (stream->readv_req || stream->writev_req) { + tevent_req_error(req, EBUSY); + goto post; + } + + subreq = state->ops->disconnect_send(state, ev, stream); + if (tevent_req_nomem(subreq, req)) { + goto post; + } + tevent_req_set_callback(subreq, tstream_disconnect_done, req); + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_disconnect_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_disconnect_state *state = tevent_req_data(req, + struct tstream_disconnect_state); + int ret; + int sys_errno; + + ret = state->ops->disconnect_recv(subreq, &sys_errno); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + tevent_req_done(req); +} + +int tstream_disconnect_recv(struct tevent_req *req, + int *perrno) +{ + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + + tevent_req_received(req); + return ret; +} + diff --git a/libatalk/tsocket/tsocket.h b/libatalk/tsocket/tsocket.h new file mode 100644 index 00000000..d983325c --- /dev/null +++ b/libatalk/tsocket/tsocket.h @@ -0,0 +1,1071 @@ +/* + Unix SMB/CIFS implementation. + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the tsocket + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . +*/ + +#ifndef _TSOCKET_H +#define _TSOCKET_H + +#include +#include + +struct tsocket_address; +struct tdgram_context; +struct tstream_context; +struct iovec; + +/** + * @mainpage + * + * The tsocket abstraction is an API ... + */ + +/** + * @defgroup tsocket The tsocket API + * + * The tsocket abstraction is split into two different kinds of + * communication interfaces. + * + * There's the "tstream_context" interface with abstracts the communication + * through a bidirectional byte stream between two endpoints. + * + * And there's the "tdgram_context" interface with abstracts datagram based + * communication between any number of endpoints. + * + * Both interfaces share the "tsocket_address" abstraction for endpoint + * addresses. + * + * The whole library is based on the talloc(3) and 'tevent' libraries and + * provides "tevent_req" based "foo_send()"/"foo_recv()" functions pairs for + * all abstracted methods that need to be async. + * + * @section vsock Virtual Sockets + * + * The abstracted layout of tdgram_context and tstream_context allow + * implementations around virtual sockets for encrypted tunnels (like TLS, + * SASL or GSSAPI) or named pipes over smb. + * + * @section npa Named Pipe Auth (NPA) Sockets + * + * Samba has an implementation to abstract named pipes over smb (within the + * server side). See libcli/named_pipe_auth/npa_tstream.[ch] for the core code. + * The current callers are located in source4/ntvfs/ipc/vfs_ipc.c and + * source4/rpc_server/service_rpc.c for the users. + */ + +/** + * @defgroup tsocket_address The tsocket_address abstraction + * @ingroup tsocket + * + * The tsocket_address represents an socket endpoint genericly. + * As it's like an abstract class it has no specific constructor. + * The specific constructors are descripted in later sections. + * + * @{ + */ + +/** + * @brief Get a string representaion of the endpoint. + * + * This function creates a string representation of the endpoint for debugging. + * The output will look as followed: + * prefix:address:port + * + * e.g. + * ipv4:192.168.1.1:143 + * + * Callers should not try to parse the string! The should use additional methods + * of the specific tsocket_address implemention to get more details. + * + * @param[in] addr The address to convert. + * + * @param[in] mem_ctx The talloc memory context to allocate the memory. + * + * @return The address as a string representation, NULL on error. + * + * @see tsocket_address_inet_addr_string() + * @see tsocket_address_inet_port() + */ +char *tsocket_address_string(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx); + +#ifdef DOXYGEN +/** + * @brief This creates a copy of a tsocket_address. + * + * This is useful when before doing modifications to a socket via additional + * methods of the specific tsocket_address implementation. + * + * @param[in] addr The address to create the copy from. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @return A newly allocated copy of addr (tsocket_address *), NULL + * on error. + */ +struct tsocket_address *tsocket_address_copy(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx); +#else +struct tsocket_address *_tsocket_address_copy(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx, + const char *location); + +#define tsocket_address_copy(addr, mem_ctx) \ + _tsocket_address_copy(addr, mem_ctx, __location__) +#endif + +/** + * @} + */ + +/** + * @defgroup tdgram_context The tdgram_context abstraction + * @ingroup tsocket + * + * The tdgram_context is like an abstract class for datagram based sockets. The + * interface provides async 'tevent_req' based functions on top functionality + * is similar to the recvfrom(2)/sendto(2)/close(2) syscalls. + * + * @note You can always use talloc_free(tdgram) to cleanup the resources + * of the tdgram_context on a fatal error. + * @{ + */ + +/** + * @brief Ask for next available datagram on the abstracted tdgram_context. + * + * It returns a 'tevent_req' handle, where the caller can register + * a callback with tevent_req_set_callback(). The callback is triggered + * when a datagram is available or an error happened. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] dgram The dgram context to work on. + * + * @return Returns a 'tevent_req' handle, where the caller can + * register a callback with tevent_req_set_callback(). + * NULL on fatal error. + * + * @see tdgram_inet_udp_socket() + * @see tdgram_unix_socket() + */ +struct tevent_req *tdgram_recvfrom_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram); + +/** + * @brief Receive the next available datagram on the abstracted tdgram_context. + * + * This function should be called by the callback when a datagram is available + * or an error happened. + * + * The caller can only have one outstanding tdgram_recvfrom_send() at a time + * otherwise the caller will get '*perrno = EBUSY'. + * + * @param[in] req The tevent request from tdgram_recvfrom_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @param[in] mem_ctx The memory context to use. + * + * @param[out] buf This will hold the buffer of the datagram. + * + * @param[out] src The abstracted tsocket_address of the sender of the + * received datagram. + * + * @return The length of the datagram (0 is never returned!), + * -1 on error with perrno set to the actual errno. + * + * @see tdgram_recvfrom_send() + */ +ssize_t tdgram_recvfrom_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + uint8_t **buf, + struct tsocket_address **src); + +/** + * @brief Send a datagram to a destination endpoint. + * + * The function can be called to send a datagram (specified by a buf/len) to a + * destination endpoint (specified by dst). It's not allowed for len to be 0. + * + * It returns a 'tevent_req' handle, where the caller can register a callback + * with tevent_req_set_callback(). The callback is triggered when the specific + * implementation (assumes it) has delivered the datagram to the "wire". + * + * The callback is then supposed to get the result by calling + * tdgram_sendto_recv() on the 'tevent_req'. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] dgram The dgram context to work on. + * + * @param[in] buf The buffer to send. + * + * @param[in] len The length of the buffer to send. It has to be bigger + * than 0. + * + * @param[in] dst The destination to send the datagram to in form of a + * tsocket_address. + * + * @return Returns a 'tevent_req' handle, where the caller can + * register a callback with tevent_req_set_callback(). + * NULL on fatal error. + * + * @see tdgram_inet_udp_socket() + * @see tdgram_unix_socket() + * @see tdgram_sendto_recv() + */ +struct tevent_req *tdgram_sendto_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + const uint8_t *buf, size_t len, + const struct tsocket_address *dst); + +/** + * @brief Receive the result of the sent datagram. + * + * The caller can only have one outstanding tdgram_sendto_send() at a time + * otherwise the caller will get '*perrno = EBUSY'. + * + * @param[in] req The tevent request from tdgram_sendto_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @return The length of the datagram (0 is never returned!), -1 on + * error with perrno set to the actual errno. + * + * @see tdgram_sendto_send() + */ +ssize_t tdgram_sendto_recv(struct tevent_req *req, + int *perrno); + +/** + * @brief Shutdown/close an abstracted socket. + * + * It returns a 'tevent_req' handle, where the caller can register a callback + * with tevent_req_set_callback(). The callback is triggered when the specific + * implementation (assumes it) has delivered the datagram to the "wire". + * + * The callback is then supposed to get the result by calling + * tdgram_sendto_recv() on the 'tevent_req'. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] dgram The dgram context diconnect from. + * + * @return Returns a 'tevent_req' handle, where the caller can + * register a callback with tevent_req_set_callback(). + * NULL on fatal error. + * + * @see tdgram_disconnect_recv() + */ +struct tevent_req *tdgram_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram); + +/** + * @brief Receive the result from a tdgram_disconnect_send() request. + * + * The caller should make sure there're no outstanding tdgram_recvfrom_send() + * and tdgram_sendto_send() calls otherwise the caller will get + * '*perrno = EBUSY'. + * + * @param[in] req The tevent request from tdgram_disconnect_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @return The length of the datagram (0 is never returned!), -1 on + * error with perrno set to the actual errno. + * + * @see tdgram_disconnect_send() + */ +int tdgram_disconnect_recv(struct tevent_req *req, + int *perrno); + +/** + * @} + */ + +/** + * @defgroup tstream_context The tstream_context abstraction + * @ingroup tsocket + * + * The tstream_context is like an abstract class for stream based sockets. The + * interface provides async 'tevent_req' based functions on top functionality + * is similar to the readv(2)/writev(2)/close(2) syscalls. + * + * @note You can always use talloc_free(tstream) to cleanup the resources + * of the tstream_context on a fatal error. + * + * @{ + */ + +/** + * @brief Report the number of bytes received but not consumed yet. + * + * The tstream_pending_bytes() function reports how much bytes of the incoming + * stream have been received but not consumed yet. + * + * @param[in] stream The tstream_context to check for pending bytes. + * + * @return The number of bytes received, -1 on error with errno + * set. + */ +ssize_t tstream_pending_bytes(struct tstream_context *stream); + +/** + * @brief Read a specific amount of bytes from a stream socket. + * + * The function can be called to read for a specific amount of bytes from the + * stream into given buffers. The caller has to preallocate the buffers. + * + * The caller might need to use tstream_pending_bytes() if the protocol doesn't + * have a fixed pdu header containing the pdu size. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] stream The tstream context to work on. + * + * @param[out] vector A preallocated iovec to store the data to read. + * + * @param[in] count The number of buffers in the vector allocated. + * + * @return A 'tevent_req' handle, where the caller can register + * a callback with tevent_req_set_callback(). NULL on + * fatal error. + * + * @see tstream_unix_connect_send() + * @see tstream_inet_tcp_connect_send() + */ +struct tevent_req *tstream_readv_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct iovec *vector, + size_t count); + +/** + * @brief Get the result of a tstream_readv_send(). + * + * The caller can only have one outstanding tstream_readv_send() + * at a time otherwise the caller will get *perrno = EBUSY. + * + * @param[in] req The tevent request from tstream_readv_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @return The length of the stream (0 is never returned!), -1 on + * error with perrno set to the actual errno. + */ +int tstream_readv_recv(struct tevent_req *req, + int *perrno); + +/** + * @brief Write buffers from a vector into a stream socket. + * + * The function can be called to write buffers from a given vector + * to a stream socket. + * + * You have to ensure that the vector is not empty. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] stream The tstream context to work on. + * + * @param[in] vector The iovec vector with data to write on a stream socket. + * + * @param[in] count The number of buffers in the vector to write. + * + * @return A 'tevent_req' handle, where the caller can register + * a callback with tevent_req_set_callback(). NULL on + * fatal error. + */ +struct tevent_req *tstream_writev_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + const struct iovec *vector, + size_t count); + +/** + * @brief Get the result of a tstream_writev_send(). + * + * The caller can only have one outstanding tstream_writev_send() + * at a time otherwise the caller will get *perrno = EBUSY. + * + * @param[in] req The tevent request from tstream_writev_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @return The length of the stream (0 is never returned!), -1 on + * error with perrno set to the actual errno. + */ +int tstream_writev_recv(struct tevent_req *req, + int *perrno); + +/** + * @brief Shutdown/close an abstracted socket. + * + * It returns a 'tevent_req' handle, where the caller can register a callback + * with tevent_req_set_callback(). The callback is triggered when the specific + * implementation (assumes it) has delivered the stream to the "wire". + * + * The callback is then supposed to get the result by calling + * tdgram_sendto_recv() on the 'tevent_req'. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] stream The tstream context to work on. + * + * @return A 'tevent_req' handle, where the caller can register + * a callback with tevent_req_set_callback(). NULL on + * fatal error. + */ +struct tevent_req *tstream_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream); + +/** + * @brief Get the result of a tstream_disconnect_send(). + * + * The caller can only have one outstanding tstream_writev_send() + * at a time otherwise the caller will get *perrno = EBUSY. + * + * @param[in] req The tevent request from tstream_disconnect_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @return The length of the stream (0 is never returned!), -1 on + * error with perrno set to the actual errno. + */ +int tstream_disconnect_recv(struct tevent_req *req, + int *perrno); + +/** + * @} + */ + + +/** + * @defgroup tsocket_bsd tsocket_bsd - inet, inet6 and unix + * @ingroup tsocket + * + * The main tsocket library comes with implentations for BSD style ipv4, ipv6 + * and unix sockets. + * + * @{ + */ + +#if DOXYGEN +/** + * @brief Create a tsocket_address for ipv4 and ipv6 endpoint addresses. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] fam The family can be can be "ipv4", "ipv6" or "ip". With + * "ip" is autodetects "ipv4" or "ipv6" based on the + * addr. + * + * @param[in] addr A valid ip address string based on the selected family + * (dns names are not allowed!). It's valid to pass NULL, + * which gets mapped to "0.0.0.0" or "::". + * + * @param[in] port A valid port number. + * + * @param[out] _addr A tsocket_address pointer to store the information. + * + * @return 0 on success, -1 on error with errno set. + */ +int tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx, + const char *fam, + const char *addr, + uint16_t port, + struct tsocket_address **_addr); +#else +int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx, + const char *fam, + const char *addr, + uint16_t port, + struct tsocket_address **_addr, + const char *location); + +#define tsocket_address_inet_from_strings(mem_ctx, fam, addr, port, _addr) \ + _tsocket_address_inet_from_strings(mem_ctx, fam, addr, port, _addr, \ + __location__) +#endif + +/** + * @brief Get the address of an 'inet' tsocket_address as a string. + * + * @param[in] addr The address to convert to a string. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @return A newly allocated string of the address, NULL on error + * with errno set. + */ +char *tsocket_address_inet_addr_string(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx); + +/** + * @brief Get the port number as an integer from an 'inet' tsocket_address. + * + * @param[in] addr The tsocket address to use. + * + * @return The port number, 0 on error with errno set. + */ +uint16_t tsocket_address_inet_port(const struct tsocket_address *addr); + +/** + * @brief Set the port number of an existing 'inet' tsocket_address. + * + * @param[in] addr The existing tsocket_address to use. + * + * @param[in] port The valid port number to set. + * + * @return 0 on success, -1 on error with errno set. + */ +int tsocket_address_inet_set_port(struct tsocket_address *addr, + uint16_t port); + +#ifdef DOXYGEN +/** + * @brief Create a tsocket_address for a unix domain endpoint addresses. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] path The filesystem path, NULL will map "". + * + * @param[in] _addr The tsocket_address pointer to store the information. + * + * @return 0 on success, -1 on error with errno set. + */ +int tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx, + const char *path, + struct tsocket_address **_addr); +#else +int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx, + const char *path, + struct tsocket_address **_addr, + const char *location); + +#define tsocket_address_unix_from_path(mem_ctx, path, _addr) \ + _tsocket_address_unix_from_path(mem_ctx, path, _addr, \ + __location__) +#endif + +/** + * @brief Get the address of an 'unix' tsocket_address. + * + * @param[in] addr A valid 'unix' tsocket_address. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @return The path of the unix domain socket, NULL on error or if + * the tsocket_address doesn't represent an unix domain + * endpoint path. + */ +char *tsocket_address_unix_path(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx); + +#ifdef DOXYGEN +/** + * @brief Create a tdgram_context for a ipv4 or ipv6 UDP communication. + * + * @param[in] local An 'inet' tsocket_address for the local endpoint. + * + * @param[in] remote An 'inet' tsocket_address for the remote endpoint or + * NULL (??? to create a listener?). + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] dgram The tdgram_context pointer to setup the udp + * communication. The function will allocate the memory. + * + * @return 0 on success, -1 on error with errno set. + */ +int tdgram_inet_udp_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram); +#else +int _tdgram_inet_udp_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram, + const char *location); +#define tdgram_inet_udp_socket(local, remote, mem_ctx, dgram) \ + _tdgram_inet_udp_socket(local, remote, mem_ctx, dgram, __location__) +#endif + +#ifdef DOXYGEN +/** + * @brief Create a tdgram_context for unix domain datagram communication. + * + * @param[in] local An 'unix' tsocket_address for the local endpoint. + * + * @param[in] remote An 'unix' tsocket_address for the remote endpoint or + * NULL (??? to create a listener?). + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] dgram The tdgram_context pointer to setup the udp + * communication. The function will allocate the memory. + * + * @return 0 on success, -1 on error with errno set. + */ +int tdgram_unix_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram); +#else +int _tdgram_unix_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram, + const char *location); + +#define tdgram_unix_socket(local, remote, mem_ctx, dgram) \ + _tdgram_unix_socket(local, remote, mem_ctx, dgram, __location__) +#endif + +/** + * @brief Connect async to a TCP endpoint and create a tstream_context for the + * stream based communication. + * + * Use this function to connenct asynchronously to a remote ipv4 or ipv6 TCP + * endpoint and create a tstream_context for the stream based communication. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] local An 'inet' tsocket_address for the local endpoint. + * + * @param[in] remote An 'inet' tsocket_address for the remote endpoint. + * + * @return A 'tevent_req' handle, where the caller can register a + * callback with tevent_req_set_callback(). NULL on a fatal + * error. + * + * @see tstream_inet_tcp_connect_recv() + */ +struct tevent_req *tstream_inet_tcp_connect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + const struct tsocket_address *local, + const struct tsocket_address *remote); + +#ifdef DOXYGEN +/** + * @brief Receive the result from a tstream_inet_tcp_connect_send(). + * + * @param[in] req The tevent request from tstream_inet_tcp_connect_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] stream A tstream_context pointer to setup the tcp communication + * on. This function will allocate the memory. + * + * @return 0 on success, -1 on error with perrno set. + */ +int tstream_inet_tcp_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream); +#else +int _tstream_inet_tcp_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream, + const char *location); +#define tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream) \ + _tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream, \ + __location__) +#endif + +/** + * @brief Connect async to a unix domain endpoint and create a tstream_context + * for the stream based communication. + * + * Use this function to connenct asynchronously to a unix domainendpoint and + * create a tstream_context for the stream based communication. + * + * The callback is triggered when a socket is connected and ready for IO or an + * error happened. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] local An 'unix' tsocket_address for the local endpoint. + * + * @param[in] remote An 'unix' tsocket_address for the remote endpoint. + * + * @return A 'tevent_req' handle, where the caller can register a + * callback with tevent_req_set_callback(). NULL on a falal + * error. + * + * @see tstream_unix_connect_recv() + */ +struct tevent_req * tstream_unix_connect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + const struct tsocket_address *local, + const struct tsocket_address *remote); + +#ifdef DOXYGEN +/** + * @brief Receive the result from a tstream_unix_connect_send(). + * + * @param[in] req The tevent request from tstream_inet_tcp_connect_send(). + * + * @param[out] perrno The error number, set if an error occurred. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] stream The tstream context to work on. + * + * @return 0 on success, -1 on error with perrno set. + */ +int tstream_unix_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream); +#else +int _tstream_unix_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream, + const char *location); +#define tstream_unix_connect_recv(req, perrno, mem_ctx, stream) \ + _tstream_unix_connect_recv(req, perrno, mem_ctx, stream, \ + __location__) +#endif + +#ifdef DOXYGEN +/** + * @brief Create two connected 'unix' tsocket_contexts for stream based + * communication. + * + * @param[in] mem_ctx1 The talloc memory context to use for stream1. + * + * @param[in] stream1 The first stream to connect. + * + * @param[in] mem_ctx2 The talloc memory context to use for stream2. + * + * @param[in] stream2 The second stream to connect. + * + * @return 0 on success, -1 on error with errno set. + */ +int tstream_unix_socketpair(TALLOC_CTX *mem_ctx1, + struct tstream_context **stream1, + TALLOC_CTX *mem_ctx2, + struct tstream_context **stream2); +#else +int _tstream_unix_socketpair(TALLOC_CTX *mem_ctx1, + struct tstream_context **_stream1, + TALLOC_CTX *mem_ctx2, + struct tstream_context **_stream2, + const char *location); + +#define tstream_unix_socketpair(mem_ctx1, stream1, mem_ctx2, stream2) \ + _tstream_unix_socketpair(mem_ctx1, stream1, mem_ctx2, stream2, \ + __location__) +#endif + +struct sockaddr; + +#ifdef DOXYGEN +/** + * @brief Convert a tsocket address to a bsd socket address. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] sa The sockaddr structure to convert. + * + * @param[in] sa_socklen The lenth of the sockaddr sturucte. + * + * @param[out] addr The tsocket pointer to allocate and fill. + * + * @return 0 on success, -1 on error with errno set. + */ +int tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx, + struct sockaddr *sa, + size_t sa_socklen, + struct tsocket_address **addr); +#else +int _tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx, + struct sockaddr *sa, + size_t sa_socklen, + struct tsocket_address **_addr, + const char *location); + +#define tsocket_address_bsd_from_sockaddr(mem_ctx, sa, sa_socklen, _addr) \ + _tsocket_address_bsd_from_sockaddr(mem_ctx, sa, sa_socklen, _addr, \ + __location__) +#endif + +/** + * @brief Fill a bsd sockaddr structure. + * + * @param[in] addr The tsocket address structure to use. + * + * @param[in] sa The bsd sockaddr structure to fill out. + * + * @param[in] sa_socklen The length of the bsd sockaddr structure to fill out. + * + * @return The actual size of the sockaddr structure, -1 on error + * with errno set. The size could differ from sa_socklen. + * + * @code + * ssize_t socklen; + * struct sockaddr_storage ss; + * + * socklen = tsocket_address_bsd_sockaddr(taddr, + * (struct sockaddr *) &ss, + * sizeof(struct sockaddr_storage)); + * if (socklen < 0) { + * return -1; + * } + * @endcode + */ +ssize_t tsocket_address_bsd_sockaddr(const struct tsocket_address *addr, + struct sockaddr *sa, + size_t sa_socklen); + +#ifdef DOXYGEN +/** + * @brief Wrap an existing file descriptors into the tstream abstraction. + * + * You can use this function to wrap an existing file descriptors into the + * tstream abstraction. After that you're not able to use this file descriptor + * for anything else. The file descriptor will be closed when the stream gets + * freed. If you still want to use the fd you have have to create a duplicate. + * + * @param[in] mem_ctx The talloc memory context to use. + * + * @param[in] fd The non blocking fd to use! + * + * @param[in] stream The filed tstream_context you allocated before. + * + * @return 0 on success, -1 on error with errno set. + * + * @warning You should read the tsocket_bsd.c code and unterstand it in order + * use this function. + */ +int tstream_bsd_existing_socket(TALLOC_CTX *mem_ctx, + int fd, + struct tstream_context **stream); +#else +int _tstream_bsd_existing_socket(TALLOC_CTX *mem_ctx, + int fd, + struct tstream_context **_stream, + const char *location); +#define tstream_bsd_existing_socket(mem_ctx, fd, stream) \ + _tstream_bsd_existing_socket(mem_ctx, fd, stream, \ + __location__) +#endif + +/** + * @} + */ + +/** + * @defgroup tsocket_helper Queue and PDU helpers + * @ingroup tsocket + * + * In order to make the live easier for callers which want to implement a + * function to receive a full PDU with a single async function pair, there're + * some helper functions. + * + * There're some cases where the caller wants doesn't care about the order of + * doing IO on the abstracted sockets. + * + * @{ + */ + +/** + * @brief Queue a dgram blob for sending through the socket. + * + * This function queues a blob for sending to destination through an existing + * dgram socket. The async callback is triggered when the whole blob is + * delivered to the underlying system socket. + * + * The caller needs to make sure that all non-scalar input parameters hang + * around for the whole lifetime of the request. + * + * @param[in] mem_ctx The memory context for the result. + * + * @param[in] ev The event context the operation should work on. + * + * @param[in] dgram The tdgram_context to send the message buffer. + * + * @param[in] queue The existing dgram queue. + * + * @param[in] buf The message buffer to send. + * + * @param[in] len The message length. + * + * @param[in] dst The destination socket address. + * + * @return The async request handle. NULL on fatal error. + * + * @see tdgram_sendto_queue_recv() + */ +struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + struct tevent_queue *queue, + const uint8_t *buf, + size_t len, + struct tsocket_address *dst); + +/** + * @brief Receive the result of the sent dgram blob. + * + * @param[in] req The tevent request from tdgram_sendto_queue_send(). + * + * @param[out] perrno The error set to the actual errno. + * + * @return The length of the datagram (0 is never returned!), -1 on + * error with perrno set to the actual errno. + */ +ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno); + +typedef int (*tstream_readv_pdu_next_vector_t)(struct tstream_context *stream, + void *private_data, + TALLOC_CTX *mem_ctx, + struct iovec **vector, + size_t *count); + +struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + tstream_readv_pdu_next_vector_t next_vector_fn, + void *next_vector_private); +int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno); + +/** + * @brief Queue a read request for a PDU on the socket. + * + * This function queues a read request for a PDU on a stream socket. The async + * callback is triggered when a full PDU has been read from the socket. + * + * The caller needs to make sure that all non-scalar input parameters hang + * around for the whole lifetime of the request. + * + * @param[in] mem_ctx The memory context for the result + * + * @param[in] ev The tevent_context to run on + * + * @param[in] stream The stream to send data through + * + * @param[in] queue The existing send queue + * + * @param[in] next_vector_fn The next vector function + * + * @param[in] next_vector_private The private_data of the next vector function + * + * @return The async request handle. NULL on fatal error. + * + * @see tstream_readv_pdu_queue_recv() + */ +struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct tevent_queue *queue, + tstream_readv_pdu_next_vector_t next_vector_fn, + void *next_vector_private); + +/** + * @brief Receive the PDU blob read from the stream. + * + * @param[in] req The tevent request from tstream_readv_pdu_queue_send(). + * + * @param[out] perrno The error set to the actual errno. + * + * @return The number of bytes read on success, -1 on error with + * perrno set to the actual errno. + */ +int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno); + +/** + * @brief Queue an iovector for sending through the socket + * + * This function queues an iovector for sending to destination through an + * existing stream socket. The async callback is triggered when the whole + * vectror has been delivered to the underlying system socket. + * + * The caller needs to make sure that all non-scalar input parameters hang + * around for the whole lifetime of the request. + * + * @param[in] mem_ctx The memory context for the result. + * + * @param[in] ev The tevent_context to run on. + * + * @param[in] stream The stream to send data through. + * + * @param[in] queue The existing send queue. + * + * @param[in] vector The iovec vector so write. + * + * @param[in] count The size of the vector. + * + * @return The async request handle. NULL on fatal error. + */ +struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct tevent_queue *queue, + const struct iovec *vector, + size_t count); + +/** + * @brief Receive the result of the sent iovector. + * + * @param[in] req The tevent request from tstream_writev_queue_send(). + * + * @param[out] perrno The error set to the actual errno. + * + * @return The length of the iovector (0 is never returned!), -1 on + * error with perrno set to the actual errno. + */ +int tstream_writev_queue_recv(struct tevent_req *req, int *perrno); + +/** + * @} + */ + +#endif /* _TSOCKET_H */ + diff --git a/libatalk/tsocket/tsocket_bsd.c b/libatalk/tsocket/tsocket_bsd.c new file mode 100644 index 00000000..43defb30 --- /dev/null +++ b/libatalk/tsocket/tsocket_bsd.c @@ -0,0 +1,2304 @@ +/* + Unix SMB/CIFS implementation. + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the tsocket + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . +*/ + +#include "replace.h" +#include "system/filesys.h" +#include "system/network.h" +#include "tsocket.h" +#include "tsocket_internal.h" + +static int tsocket_bsd_error_from_errno(int ret, + int sys_errno, + bool *retry) +{ + *retry = false; + + if (ret >= 0) { + return 0; + } + + if (ret != -1) { + return EIO; + } + + if (sys_errno == 0) { + return EIO; + } + + if (sys_errno == EINTR) { + *retry = true; + return sys_errno; + } + + if (sys_errno == EINPROGRESS) { + *retry = true; + return sys_errno; + } + + if (sys_errno == EAGAIN) { + *retry = true; + return sys_errno; + } + +#ifdef EWOULDBLOCK + if (sys_errno == EWOULDBLOCK) { + *retry = true; + return sys_errno; + } +#endif + + return sys_errno; +} + +static int tsocket_bsd_common_prepare_fd(int fd, bool high_fd) +{ + int i; + int sys_errno = 0; + int fds[3]; + int num_fds = 0; + + int result, flags; + + if (fd == -1) { + return -1; + } + + /* first make a fd >= 3 */ + if (high_fd) { + while (fd < 3) { + fds[num_fds++] = fd; + fd = dup(fd); + if (fd == -1) { + sys_errno = errno; + break; + } + } + for (i=0; i= 0) { + flags |= FD_CLOEXEC; + result = fcntl(fd, F_SETFD, flags); + } + if (result < 0) { + goto fail; + } +#endif + return fd; + + fail: + if (fd != -1) { + sys_errno = errno; + close(fd); + errno = sys_errno; + } + return -1; +} + +static ssize_t tsocket_bsd_pending(int fd) +{ + int ret, error; + int value = 0; + socklen_t len; + + ret = ioctl(fd, FIONREAD, &value); + if (ret == -1) { + return ret; + } + + if (ret != 0) { + /* this should not be reached */ + errno = EIO; + return -1; + } + + if (value != 0) { + return value; + } + + error = 0; + len = sizeof(error); + + /* + * if no data is available check if the socket is in error state. For + * dgram sockets it's the way to return ICMP error messages of + * connected sockets to the caller. + */ + ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &error, &len); + if (ret == -1) { + return ret; + } + if (error != 0) { + errno = error; + return -1; + } + return 0; +} + +static const struct tsocket_address_ops tsocket_address_bsd_ops; + +struct tsocket_address_bsd { + socklen_t sa_socklen; + union { + struct sockaddr sa; + struct sockaddr_in in; +#ifdef HAVE_IPV6 + struct sockaddr_in6 in6; +#endif + struct sockaddr_un un; + struct sockaddr_storage ss; + } u; +}; + +int _tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx, + struct sockaddr *sa, + size_t sa_socklen, + struct tsocket_address **_addr, + const char *location) +{ + struct tsocket_address *addr; + struct tsocket_address_bsd *bsda; + + if (sa_socklen < sizeof(sa->sa_family)) { + errno = EINVAL; + return -1; + } + + switch (sa->sa_family) { + case AF_UNIX: + if (sa_socklen > sizeof(struct sockaddr_un)) { + sa_socklen = sizeof(struct sockaddr_un); + } + break; + case AF_INET: + if (sa_socklen < sizeof(struct sockaddr_in)) { + errno = EINVAL; + return -1; + } + sa_socklen = sizeof(struct sockaddr_in); + break; +#ifdef HAVE_IPV6 + case AF_INET6: + if (sa_socklen < sizeof(struct sockaddr_in6)) { + errno = EINVAL; + return -1; + } + sa_socklen = sizeof(struct sockaddr_in6); + break; +#endif + default: + errno = EAFNOSUPPORT; + return -1; + } + + if (sa_socklen > sizeof(struct sockaddr_storage)) { + errno = EINVAL; + return -1; + } + + addr = tsocket_address_create(mem_ctx, + &tsocket_address_bsd_ops, + &bsda, + struct tsocket_address_bsd, + location); + if (!addr) { + errno = ENOMEM; + return -1; + } + + ZERO_STRUCTP(bsda); + + memcpy(&bsda->u.ss, sa, sa_socklen); + + bsda->sa_socklen = sa_socklen; + + *_addr = addr; + return 0; +} + +ssize_t tsocket_address_bsd_sockaddr(const struct tsocket_address *addr, + struct sockaddr *sa, + size_t sa_socklen) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + + if (!bsda) { + errno = EINVAL; + return -1; + } + + if (sa_socklen < bsda->sa_socklen) { + errno = EINVAL; + return -1; + } + + if (sa_socklen > bsda->sa_socklen) { + memset(sa, 0, sa_socklen); + sa_socklen = bsda->sa_socklen; + } + + memcpy(sa, &bsda->u.ss, sa_socklen); + return sa_socklen; +} + +int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx, + const char *fam, + const char *addr, + uint16_t port, + struct tsocket_address **_addr, + const char *location) +{ + struct addrinfo hints; + struct addrinfo *result = NULL; + char port_str[6]; + int ret; + + ZERO_STRUCT(hints); + /* + * we use SOCKET_STREAM here to get just one result + * back from getaddrinfo(). + */ + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV; + + if (strcasecmp(fam, "ip") == 0) { + hints.ai_family = AF_UNSPEC; + if (!addr) { +#ifdef HAVE_IPV6 + addr = "::"; +#else + addr = "0.0.0.0"; +#endif + } + } else if (strcasecmp(fam, "ipv4") == 0) { + hints.ai_family = AF_INET; + if (!addr) { + addr = "0.0.0.0"; + } +#ifdef HAVE_IPV6 + } else if (strcasecmp(fam, "ipv6") == 0) { + hints.ai_family = AF_INET6; + if (!addr) { + addr = "::"; + } +#endif + } else { + errno = EAFNOSUPPORT; + return -1; + } + + snprintf(port_str, sizeof(port_str) - 1, "%u", port); + + ret = getaddrinfo(addr, port_str, &hints, &result); + if (ret != 0) { + switch (ret) { + case EAI_FAIL: + errno = EINVAL; + break; + } + ret = -1; + goto done; + } + + if (result->ai_socktype != SOCK_STREAM) { + errno = EINVAL; + ret = -1; + goto done; + } + + ret = _tsocket_address_bsd_from_sockaddr(mem_ctx, + result->ai_addr, + result->ai_addrlen, + _addr, + location); + +done: + if (result) { + freeaddrinfo(result); + } + return ret; +} + +char *tsocket_address_inet_addr_string(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + char addr_str[INET6_ADDRSTRLEN+1]; + const char *str; + + if (!bsda) { + errno = EINVAL; + return NULL; + } + + switch (bsda->u.sa.sa_family) { + case AF_INET: + str = inet_ntop(bsda->u.in.sin_family, + &bsda->u.in.sin_addr, + addr_str, sizeof(addr_str)); + break; +#ifdef HAVE_IPV6 + case AF_INET6: + str = inet_ntop(bsda->u.in6.sin6_family, + &bsda->u.in6.sin6_addr, + addr_str, sizeof(addr_str)); + break; +#endif + default: + errno = EINVAL; + return NULL; + } + + if (!str) { + return NULL; + } + + return talloc_strdup(mem_ctx, str); +} + +uint16_t tsocket_address_inet_port(const struct tsocket_address *addr) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + uint16_t port = 0; + + if (!bsda) { + errno = EINVAL; + return 0; + } + + switch (bsda->u.sa.sa_family) { + case AF_INET: + port = ntohs(bsda->u.in.sin_port); + break; +#ifdef HAVE_IPV6 + case AF_INET6: + port = ntohs(bsda->u.in6.sin6_port); + break; +#endif + default: + errno = EINVAL; + return 0; + } + + return port; +} + +int tsocket_address_inet_set_port(struct tsocket_address *addr, + uint16_t port) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + + if (!bsda) { + errno = EINVAL; + return -1; + } + + switch (bsda->u.sa.sa_family) { + case AF_INET: + bsda->u.in.sin_port = htons(port); + break; +#ifdef HAVE_IPV6 + case AF_INET6: + bsda->u.in6.sin6_port = htons(port); + break; +#endif + default: + errno = EINVAL; + return -1; + } + + return 0; +} + +int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx, + const char *path, + struct tsocket_address **_addr, + const char *location) +{ + struct sockaddr_un un; + void *p = &un; + int ret; + + if (!path) { + path = ""; + } + + if (strlen(path) > sizeof(un.sun_path)-1) { + errno = ENAMETOOLONG; + return -1; + } + + ZERO_STRUCT(un); + un.sun_family = AF_UNIX; + strncpy(un.sun_path, path, sizeof(un.sun_path)-1); + + ret = _tsocket_address_bsd_from_sockaddr(mem_ctx, + (struct sockaddr *)p, + sizeof(un), + _addr, + location); + + return ret; +} + +char *tsocket_address_unix_path(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + const char *str; + + if (!bsda) { + errno = EINVAL; + return NULL; + } + + switch (bsda->u.sa.sa_family) { + case AF_UNIX: + str = bsda->u.un.sun_path; + break; + default: + errno = EINVAL; + return NULL; + } + + return talloc_strdup(mem_ctx, str); +} + +static char *tsocket_address_bsd_string(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + char *str; + char *addr_str; + const char *prefix = NULL; + uint16_t port; + + switch (bsda->u.sa.sa_family) { + case AF_UNIX: + return talloc_asprintf(mem_ctx, "unix:%s", + bsda->u.un.sun_path); + case AF_INET: + prefix = "ipv4"; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + prefix = "ipv6"; + break; +#endif + default: + errno = EINVAL; + return NULL; + } + + addr_str = tsocket_address_inet_addr_string(addr, mem_ctx); + if (!addr_str) { + return NULL; + } + + port = tsocket_address_inet_port(addr); + + str = talloc_asprintf(mem_ctx, "%s:%s:%u", + prefix, addr_str, port); + talloc_free(addr_str); + + return str; +} + +static struct tsocket_address *tsocket_address_bsd_copy(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx, + const char *location) +{ + struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data, + struct tsocket_address_bsd); + struct tsocket_address *copy; + int ret; + + ret = _tsocket_address_bsd_from_sockaddr(mem_ctx, + &bsda->u.sa, + bsda->sa_socklen, + ©, + location); + if (ret != 0) { + return NULL; + } + + return copy; +} + +static const struct tsocket_address_ops tsocket_address_bsd_ops = { + .name = "bsd", + .string = tsocket_address_bsd_string, + .copy = tsocket_address_bsd_copy, +}; + +struct tdgram_bsd { + int fd; + + void *event_ptr; + struct tevent_fd *fde; + + void *readable_private; + void (*readable_handler)(void *private_data); + void *writeable_private; + void (*writeable_handler)(void *private_data); +}; + +static void tdgram_bsd_fde_handler(struct tevent_context *ev, + struct tevent_fd *fde, + uint16_t flags, + void *private_data) +{ + struct tdgram_bsd *bsds = talloc_get_type_abort(private_data, + struct tdgram_bsd); + + if (flags & TEVENT_FD_WRITE) { + bsds->writeable_handler(bsds->writeable_private); + return; + } + if (flags & TEVENT_FD_READ) { + if (!bsds->readable_handler) { + TEVENT_FD_NOT_READABLE(bsds->fde); + return; + } + bsds->readable_handler(bsds->readable_private); + return; + } +} + +static int tdgram_bsd_set_readable_handler(struct tdgram_bsd *bsds, + struct tevent_context *ev, + void (*handler)(void *private_data), + void *private_data) +{ + if (ev == NULL) { + if (handler) { + errno = EINVAL; + return -1; + } + if (!bsds->readable_handler) { + return 0; + } + bsds->readable_handler = NULL; + bsds->readable_private = NULL; + + return 0; + } + + /* read and write must use the same tevent_context */ + if (bsds->event_ptr != ev) { + if (bsds->readable_handler || bsds->writeable_handler) { + errno = EINVAL; + return -1; + } + bsds->event_ptr = NULL; + TALLOC_FREE(bsds->fde); + } + + if (tevent_fd_get_flags(bsds->fde) == 0) { + TALLOC_FREE(bsds->fde); + + bsds->fde = tevent_add_fd(ev, bsds, + bsds->fd, TEVENT_FD_READ, + tdgram_bsd_fde_handler, + bsds); + if (!bsds->fde) { + errno = ENOMEM; + return -1; + } + + /* cache the event context we're running on */ + bsds->event_ptr = ev; + } else if (!bsds->readable_handler) { + TEVENT_FD_READABLE(bsds->fde); + } + + bsds->readable_handler = handler; + bsds->readable_private = private_data; + + return 0; +} + +static int tdgram_bsd_set_writeable_handler(struct tdgram_bsd *bsds, + struct tevent_context *ev, + void (*handler)(void *private_data), + void *private_data) +{ + if (ev == NULL) { + if (handler) { + errno = EINVAL; + return -1; + } + if (!bsds->writeable_handler) { + return 0; + } + bsds->writeable_handler = NULL; + bsds->writeable_private = NULL; + TEVENT_FD_NOT_WRITEABLE(bsds->fde); + + return 0; + } + + /* read and write must use the same tevent_context */ + if (bsds->event_ptr != ev) { + if (bsds->readable_handler || bsds->writeable_handler) { + errno = EINVAL; + return -1; + } + bsds->event_ptr = NULL; + TALLOC_FREE(bsds->fde); + } + + if (tevent_fd_get_flags(bsds->fde) == 0) { + TALLOC_FREE(bsds->fde); + + bsds->fde = tevent_add_fd(ev, bsds, + bsds->fd, TEVENT_FD_WRITE, + tdgram_bsd_fde_handler, + bsds); + if (!bsds->fde) { + errno = ENOMEM; + return -1; + } + + /* cache the event context we're running on */ + bsds->event_ptr = ev; + } else if (!bsds->writeable_handler) { + TEVENT_FD_WRITEABLE(bsds->fde); + } + + bsds->writeable_handler = handler; + bsds->writeable_private = private_data; + + return 0; +} + +struct tdgram_bsd_recvfrom_state { + struct tdgram_context *dgram; + + uint8_t *buf; + size_t len; + struct tsocket_address *src; +}; + +static int tdgram_bsd_recvfrom_destructor(struct tdgram_bsd_recvfrom_state *state) +{ + struct tdgram_bsd *bsds = tdgram_context_data(state->dgram, + struct tdgram_bsd); + + tdgram_bsd_set_readable_handler(bsds, NULL, NULL, NULL); + + return 0; +} + +static void tdgram_bsd_recvfrom_handler(void *private_data); + +static struct tevent_req *tdgram_bsd_recvfrom_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram) +{ + struct tevent_req *req; + struct tdgram_bsd_recvfrom_state *state; + struct tdgram_bsd *bsds = tdgram_context_data(dgram, struct tdgram_bsd); + int ret; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_bsd_recvfrom_state); + if (!req) { + return NULL; + } + + state->dgram = dgram; + state->buf = NULL; + state->len = 0; + state->src = NULL; + + talloc_set_destructor(state, tdgram_bsd_recvfrom_destructor); + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + /* + * this is a fast path, not waiting for the + * socket to become explicit readable gains + * about 10%-20% performance in benchmark tests. + */ + tdgram_bsd_recvfrom_handler(req); + if (!tevent_req_is_in_progress(req)) { + goto post; + } + + ret = tdgram_bsd_set_readable_handler(bsds, ev, + tdgram_bsd_recvfrom_handler, + req); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_bsd_recvfrom_handler(void *private_data) +{ + struct tevent_req *req = talloc_get_type_abort(private_data, + struct tevent_req); + struct tdgram_bsd_recvfrom_state *state = tevent_req_data(req, + struct tdgram_bsd_recvfrom_state); + struct tdgram_context *dgram = state->dgram; + struct tdgram_bsd *bsds = tdgram_context_data(dgram, struct tdgram_bsd); + struct tsocket_address_bsd *bsda; + ssize_t ret; + int err; + bool retry; + + ret = tsocket_bsd_pending(bsds->fd); + if (ret == 0) { + /* retry later */ + return; + } + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + state->buf = talloc_array(state, uint8_t, ret); + if (tevent_req_nomem(state->buf, req)) { + return; + } + state->len = ret; + + state->src = tsocket_address_create(state, + &tsocket_address_bsd_ops, + &bsda, + struct tsocket_address_bsd, + __location__ "bsd_recvfrom"); + if (tevent_req_nomem(state->src, req)) { + return; + } + + ZERO_STRUCTP(bsda); + bsda->sa_socklen = sizeof(bsda->u.ss); + + ret = recvfrom(bsds->fd, state->buf, state->len, 0, + &bsda->u.sa, &bsda->sa_socklen); + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + /* + * Some systems (FreeBSD, see bug #7115) return too much + * bytes in tsocket_bsd_pending()/ioctl(fd, FIONREAD, ...), + * the return value includes some IP/UDP header bytes, + * while recvfrom() just returns the payload. + */ + state->buf = talloc_realloc(state, state->buf, uint8_t, ret); + if (tevent_req_nomem(state->buf, req)) { + return; + } + state->len = ret; + + tevent_req_done(req); +} + +static ssize_t tdgram_bsd_recvfrom_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + uint8_t **buf, + struct tsocket_address **src) +{ + struct tdgram_bsd_recvfrom_state *state = tevent_req_data(req, + struct tdgram_bsd_recvfrom_state); + ssize_t ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + *buf = talloc_move(mem_ctx, &state->buf); + ret = state->len; + if (src) { + *src = talloc_move(mem_ctx, &state->src); + } + } + + tevent_req_received(req); + return ret; +} + +struct tdgram_bsd_sendto_state { + struct tdgram_context *dgram; + + const uint8_t *buf; + size_t len; + const struct tsocket_address *dst; + + ssize_t ret; +}; + +static int tdgram_bsd_sendto_destructor(struct tdgram_bsd_sendto_state *state) +{ + struct tdgram_bsd *bsds = tdgram_context_data(state->dgram, + struct tdgram_bsd); + + tdgram_bsd_set_writeable_handler(bsds, NULL, NULL, NULL); + + return 0; +} + +static void tdgram_bsd_sendto_handler(void *private_data); + +static struct tevent_req *tdgram_bsd_sendto_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + const uint8_t *buf, + size_t len, + const struct tsocket_address *dst) +{ + struct tevent_req *req; + struct tdgram_bsd_sendto_state *state; + struct tdgram_bsd *bsds = tdgram_context_data(dgram, struct tdgram_bsd); + int ret; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_bsd_sendto_state); + if (!req) { + return NULL; + } + + state->dgram = dgram; + state->buf = buf; + state->len = len; + state->dst = dst; + state->ret = -1; + + talloc_set_destructor(state, tdgram_bsd_sendto_destructor); + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + /* + * this is a fast path, not waiting for the + * socket to become explicit writeable gains + * about 10%-20% performance in benchmark tests. + */ + tdgram_bsd_sendto_handler(req); + if (!tevent_req_is_in_progress(req)) { + goto post; + } + + ret = tdgram_bsd_set_writeable_handler(bsds, ev, + tdgram_bsd_sendto_handler, + req); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_bsd_sendto_handler(void *private_data) +{ + struct tevent_req *req = talloc_get_type_abort(private_data, + struct tevent_req); + struct tdgram_bsd_sendto_state *state = tevent_req_data(req, + struct tdgram_bsd_sendto_state); + struct tdgram_context *dgram = state->dgram; + struct tdgram_bsd *bsds = tdgram_context_data(dgram, struct tdgram_bsd); + struct sockaddr *sa = NULL; + socklen_t sa_socklen = 0; + ssize_t ret; + int err; + bool retry; + + if (state->dst) { + struct tsocket_address_bsd *bsda = + talloc_get_type(state->dst->private_data, + struct tsocket_address_bsd); + + sa = &bsda->u.sa; + sa_socklen = bsda->sa_socklen; + } + + ret = sendto(bsds->fd, state->buf, state->len, 0, sa, sa_socklen); + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + state->ret = ret; + + tevent_req_done(req); +} + +static ssize_t tdgram_bsd_sendto_recv(struct tevent_req *req, int *perrno) +{ + struct tdgram_bsd_sendto_state *state = tevent_req_data(req, + struct tdgram_bsd_sendto_state); + ssize_t ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tdgram_bsd_disconnect_state { + uint8_t __dummy; +}; + +static struct tevent_req *tdgram_bsd_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram) +{ + struct tdgram_bsd *bsds = tdgram_context_data(dgram, struct tdgram_bsd); + struct tevent_req *req; + struct tdgram_bsd_disconnect_state *state; + int ret; + int err; + bool dummy; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_bsd_disconnect_state); + if (req == NULL) { + return NULL; + } + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + ret = close(bsds->fd); + bsds->fd = -1; + err = tsocket_bsd_error_from_errno(ret, errno, &dummy); + if (tevent_req_error(req, err)) { + goto post; + } + + tevent_req_done(req); +post: + tevent_req_post(req, ev); + return req; +} + +static int tdgram_bsd_disconnect_recv(struct tevent_req *req, + int *perrno) +{ + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + + tevent_req_received(req); + return ret; +} + +static const struct tdgram_context_ops tdgram_bsd_ops = { + .name = "bsd", + + .recvfrom_send = tdgram_bsd_recvfrom_send, + .recvfrom_recv = tdgram_bsd_recvfrom_recv, + + .sendto_send = tdgram_bsd_sendto_send, + .sendto_recv = tdgram_bsd_sendto_recv, + + .disconnect_send = tdgram_bsd_disconnect_send, + .disconnect_recv = tdgram_bsd_disconnect_recv, +}; + +static int tdgram_bsd_destructor(struct tdgram_bsd *bsds) +{ + TALLOC_FREE(bsds->fde); + if (bsds->fd != -1) { + close(bsds->fd); + bsds->fd = -1; + } + return 0; +} + +static int tdgram_bsd_dgram_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + bool broadcast, + TALLOC_CTX *mem_ctx, + struct tdgram_context **_dgram, + const char *location) +{ + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + struct tsocket_address_bsd *rbsda = NULL; + struct tdgram_context *dgram; + struct tdgram_bsd *bsds; + int fd; + int ret; + bool do_bind = false; + bool do_reuseaddr = false; + bool do_ipv6only = false; + bool is_inet = false; + int sa_fam = lbsda->u.sa.sa_family; + + if (remote) { + rbsda = talloc_get_type_abort(remote->private_data, + struct tsocket_address_bsd); + } + + switch (lbsda->u.sa.sa_family) { + case AF_UNIX: + if (broadcast) { + errno = EINVAL; + return -1; + } + if (lbsda->u.un.sun_path[0] != 0) { + do_reuseaddr = true; + do_bind = true; + } + break; + case AF_INET: + if (lbsda->u.in.sin_port != 0) { + do_reuseaddr = true; + do_bind = true; + } + if (lbsda->u.in.sin_addr.s_addr != INADDR_ANY) { + do_bind = true; + } + is_inet = true; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + if (lbsda->u.in6.sin6_port != 0) { + do_reuseaddr = true; + do_bind = true; + } + if (memcmp(&in6addr_any, + &lbsda->u.in6.sin6_addr, + sizeof(in6addr_any)) != 0) { + do_bind = true; + } + is_inet = true; + do_ipv6only = true; + break; +#endif + default: + errno = EINVAL; + return -1; + } + + if (!do_bind && is_inet && rbsda) { + sa_fam = rbsda->u.sa.sa_family; + switch (sa_fam) { + case AF_INET: + do_ipv6only = false; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + do_ipv6only = true; + break; +#endif + } + } + + fd = socket(sa_fam, SOCK_DGRAM, 0); + if (fd < 0) { + return fd; + } + + fd = tsocket_bsd_common_prepare_fd(fd, true); + if (fd < 0) { + return fd; + } + + dgram = tdgram_context_create(mem_ctx, + &tdgram_bsd_ops, + &bsds, + struct tdgram_bsd, + location); + if (!dgram) { + int saved_errno = errno; + close(fd); + errno = saved_errno; + return -1; + } + ZERO_STRUCTP(bsds); + bsds->fd = fd; + talloc_set_destructor(bsds, tdgram_bsd_destructor); + +#ifdef HAVE_IPV6 + if (do_ipv6only) { + int val = 1; + + ret = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, + (const void *)&val, sizeof(val)); + if (ret == -1) { + int saved_errno = errno; + talloc_free(dgram); + errno = saved_errno; + return ret; + } + } +#endif + + if (broadcast) { + int val = 1; + + ret = setsockopt(fd, SOL_SOCKET, SO_BROADCAST, + (const void *)&val, sizeof(val)); + if (ret == -1) { + int saved_errno = errno; + talloc_free(dgram); + errno = saved_errno; + return ret; + } + } + + if (do_reuseaddr) { + int val = 1; + + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, + (const void *)&val, sizeof(val)); + if (ret == -1) { + int saved_errno = errno; + talloc_free(dgram); + errno = saved_errno; + return ret; + } + } + + if (do_bind) { + ret = bind(fd, &lbsda->u.sa, lbsda->sa_socklen); + if (ret == -1) { + int saved_errno = errno; + talloc_free(dgram); + errno = saved_errno; + return ret; + } + } + + if (rbsda) { + if (rbsda->u.sa.sa_family != sa_fam) { + talloc_free(dgram); + errno = EINVAL; + return -1; + } + + ret = connect(fd, &rbsda->u.sa, rbsda->sa_socklen); + if (ret == -1) { + int saved_errno = errno; + talloc_free(dgram); + errno = saved_errno; + return ret; + } + } + + *_dgram = dgram; + return 0; +} + +int _tdgram_inet_udp_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram, + const char *location) +{ + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + int ret; + + switch (lbsda->u.sa.sa_family) { + case AF_INET: + break; +#ifdef HAVE_IPV6 + case AF_INET6: + break; +#endif + default: + errno = EINVAL; + return -1; + } + + ret = tdgram_bsd_dgram_socket(local, remote, false, + mem_ctx, dgram, location); + + return ret; +} + +int _tdgram_unix_socket(const struct tsocket_address *local, + const struct tsocket_address *remote, + TALLOC_CTX *mem_ctx, + struct tdgram_context **dgram, + const char *location) +{ + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + int ret; + + switch (lbsda->u.sa.sa_family) { + case AF_UNIX: + break; + default: + errno = EINVAL; + return -1; + } + + ret = tdgram_bsd_dgram_socket(local, remote, false, + mem_ctx, dgram, location); + + return ret; +} + +struct tstream_bsd { + int fd; + + void *event_ptr; + struct tevent_fd *fde; + + void *readable_private; + void (*readable_handler)(void *private_data); + void *writeable_private; + void (*writeable_handler)(void *private_data); +}; + +static void tstream_bsd_fde_handler(struct tevent_context *ev, + struct tevent_fd *fde, + uint16_t flags, + void *private_data) +{ + struct tstream_bsd *bsds = talloc_get_type_abort(private_data, + struct tstream_bsd); + + if (flags & TEVENT_FD_WRITE) { + bsds->writeable_handler(bsds->writeable_private); + return; + } + if (flags & TEVENT_FD_READ) { + if (!bsds->readable_handler) { + if (bsds->writeable_handler) { + bsds->writeable_handler(bsds->writeable_private); + return; + } + TEVENT_FD_NOT_READABLE(bsds->fde); + return; + } + bsds->readable_handler(bsds->readable_private); + return; + } +} + +static int tstream_bsd_set_readable_handler(struct tstream_bsd *bsds, + struct tevent_context *ev, + void (*handler)(void *private_data), + void *private_data) +{ + if (ev == NULL) { + if (handler) { + errno = EINVAL; + return -1; + } + if (!bsds->readable_handler) { + return 0; + } + bsds->readable_handler = NULL; + bsds->readable_private = NULL; + + return 0; + } + + /* read and write must use the same tevent_context */ + if (bsds->event_ptr != ev) { + if (bsds->readable_handler || bsds->writeable_handler) { + errno = EINVAL; + return -1; + } + bsds->event_ptr = NULL; + TALLOC_FREE(bsds->fde); + } + + if (tevent_fd_get_flags(bsds->fde) == 0) { + TALLOC_FREE(bsds->fde); + + bsds->fde = tevent_add_fd(ev, bsds, + bsds->fd, TEVENT_FD_READ, + tstream_bsd_fde_handler, + bsds); + if (!bsds->fde) { + errno = ENOMEM; + return -1; + } + + /* cache the event context we're running on */ + bsds->event_ptr = ev; + } else if (!bsds->readable_handler) { + TEVENT_FD_READABLE(bsds->fde); + } + + bsds->readable_handler = handler; + bsds->readable_private = private_data; + + return 0; +} + +static int tstream_bsd_set_writeable_handler(struct tstream_bsd *bsds, + struct tevent_context *ev, + void (*handler)(void *private_data), + void *private_data) +{ + if (ev == NULL) { + if (handler) { + errno = EINVAL; + return -1; + } + if (!bsds->writeable_handler) { + return 0; + } + bsds->writeable_handler = NULL; + bsds->writeable_private = NULL; + TEVENT_FD_NOT_WRITEABLE(bsds->fde); + + return 0; + } + + /* read and write must use the same tevent_context */ + if (bsds->event_ptr != ev) { + if (bsds->readable_handler || bsds->writeable_handler) { + errno = EINVAL; + return -1; + } + bsds->event_ptr = NULL; + TALLOC_FREE(bsds->fde); + } + + if (tevent_fd_get_flags(bsds->fde) == 0) { + TALLOC_FREE(bsds->fde); + + bsds->fde = tevent_add_fd(ev, bsds, + bsds->fd, + TEVENT_FD_READ | TEVENT_FD_WRITE, + tstream_bsd_fde_handler, + bsds); + if (!bsds->fde) { + errno = ENOMEM; + return -1; + } + + /* cache the event context we're running on */ + bsds->event_ptr = ev; + } else if (!bsds->writeable_handler) { + uint16_t flags = tevent_fd_get_flags(bsds->fde); + flags |= TEVENT_FD_READ | TEVENT_FD_WRITE; + tevent_fd_set_flags(bsds->fde, flags); + } + + bsds->writeable_handler = handler; + bsds->writeable_private = private_data; + + return 0; +} + +static ssize_t tstream_bsd_pending_bytes(struct tstream_context *stream) +{ + struct tstream_bsd *bsds = tstream_context_data(stream, + struct tstream_bsd); + ssize_t ret; + + if (bsds->fd == -1) { + errno = ENOTCONN; + return -1; + } + + ret = tsocket_bsd_pending(bsds->fd); + + return ret; +} + +struct tstream_bsd_readv_state { + struct tstream_context *stream; + + struct iovec *vector; + size_t count; + + int ret; +}; + +static int tstream_bsd_readv_destructor(struct tstream_bsd_readv_state *state) +{ + struct tstream_bsd *bsds = tstream_context_data(state->stream, + struct tstream_bsd); + + tstream_bsd_set_readable_handler(bsds, NULL, NULL, NULL); + + return 0; +} + +static void tstream_bsd_readv_handler(void *private_data); + +static struct tevent_req *tstream_bsd_readv_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct iovec *vector, + size_t count) +{ + struct tevent_req *req; + struct tstream_bsd_readv_state *state; + struct tstream_bsd *bsds = tstream_context_data(stream, struct tstream_bsd); + int ret; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_bsd_readv_state); + if (!req) { + return NULL; + } + + state->stream = stream; + /* we make a copy of the vector so that we can modify it */ + state->vector = talloc_array(state, struct iovec, count); + if (tevent_req_nomem(state->vector, req)) { + goto post; + } + memcpy(state->vector, vector, sizeof(struct iovec)*count); + state->count = count; + state->ret = 0; + + talloc_set_destructor(state, tstream_bsd_readv_destructor); + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + /* + * this is a fast path, not waiting for the + * socket to become explicit readable gains + * about 10%-20% performance in benchmark tests. + */ + tstream_bsd_readv_handler(req); + if (!tevent_req_is_in_progress(req)) { + goto post; + } + + ret = tstream_bsd_set_readable_handler(bsds, ev, + tstream_bsd_readv_handler, + req); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_bsd_readv_handler(void *private_data) +{ + struct tevent_req *req = talloc_get_type_abort(private_data, + struct tevent_req); + struct tstream_bsd_readv_state *state = tevent_req_data(req, + struct tstream_bsd_readv_state); + struct tstream_context *stream = state->stream; + struct tstream_bsd *bsds = tstream_context_data(stream, struct tstream_bsd); + int ret; + int err; + bool retry; + + ret = readv(bsds->fd, state->vector, state->count); + if (ret == 0) { + /* propagate end of file */ + tevent_req_error(req, EPIPE); + return; + } + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + state->ret += ret; + + while (ret > 0) { + if (ret < state->vector[0].iov_len) { + uint8_t *base; + base = (uint8_t *)state->vector[0].iov_base; + base += ret; + state->vector[0].iov_base = base; + state->vector[0].iov_len -= ret; + break; + } + ret -= state->vector[0].iov_len; + state->vector += 1; + state->count -= 1; + } + + /* + * there're maybe some empty vectors at the end + * which we need to skip, otherwise we would get + * ret == 0 from the readv() call and return EPIPE + */ + while (state->count > 0) { + if (state->vector[0].iov_len > 0) { + break; + } + state->vector += 1; + state->count -= 1; + } + + if (state->count > 0) { + /* we have more to read */ + return; + } + + tevent_req_done(req); +} + +static int tstream_bsd_readv_recv(struct tevent_req *req, + int *perrno) +{ + struct tstream_bsd_readv_state *state = tevent_req_data(req, + struct tstream_bsd_readv_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_bsd_writev_state { + struct tstream_context *stream; + + struct iovec *vector; + size_t count; + + int ret; +}; + +static int tstream_bsd_writev_destructor(struct tstream_bsd_writev_state *state) +{ + struct tstream_bsd *bsds = tstream_context_data(state->stream, + struct tstream_bsd); + + tstream_bsd_set_writeable_handler(bsds, NULL, NULL, NULL); + + return 0; +} + +static void tstream_bsd_writev_handler(void *private_data); + +static struct tevent_req *tstream_bsd_writev_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + const struct iovec *vector, + size_t count) +{ + struct tevent_req *req; + struct tstream_bsd_writev_state *state; + struct tstream_bsd *bsds = tstream_context_data(stream, struct tstream_bsd); + int ret; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_bsd_writev_state); + if (!req) { + return NULL; + } + + state->stream = stream; + /* we make a copy of the vector so that we can modify it */ + state->vector = talloc_array(state, struct iovec, count); + if (tevent_req_nomem(state->vector, req)) { + goto post; + } + memcpy(state->vector, vector, sizeof(struct iovec)*count); + state->count = count; + state->ret = 0; + + talloc_set_destructor(state, tstream_bsd_writev_destructor); + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + /* + * this is a fast path, not waiting for the + * socket to become explicit writeable gains + * about 10%-20% performance in benchmark tests. + */ + tstream_bsd_writev_handler(req); + if (!tevent_req_is_in_progress(req)) { + goto post; + } + + ret = tstream_bsd_set_writeable_handler(bsds, ev, + tstream_bsd_writev_handler, + req); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_bsd_writev_handler(void *private_data) +{ + struct tevent_req *req = talloc_get_type_abort(private_data, + struct tevent_req); + struct tstream_bsd_writev_state *state = tevent_req_data(req, + struct tstream_bsd_writev_state); + struct tstream_context *stream = state->stream; + struct tstream_bsd *bsds = tstream_context_data(stream, struct tstream_bsd); + ssize_t ret; + int err; + bool retry; + + ret = writev(bsds->fd, state->vector, state->count); + if (ret == 0) { + /* propagate end of file */ + tevent_req_error(req, EPIPE); + return; + } + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + state->ret += ret; + + while (ret > 0) { + if (ret < state->vector[0].iov_len) { + uint8_t *base; + base = (uint8_t *)state->vector[0].iov_base; + base += ret; + state->vector[0].iov_base = base; + state->vector[0].iov_len -= ret; + break; + } + ret -= state->vector[0].iov_len; + state->vector += 1; + state->count -= 1; + } + + /* + * there're maybe some empty vectors at the end + * which we need to skip, otherwise we would get + * ret == 0 from the writev() call and return EPIPE + */ + while (state->count > 0) { + if (state->vector[0].iov_len > 0) { + break; + } + state->vector += 1; + state->count -= 1; + } + + if (state->count > 0) { + /* we have more to read */ + return; + } + + tevent_req_done(req); +} + +static int tstream_bsd_writev_recv(struct tevent_req *req, int *perrno) +{ + struct tstream_bsd_writev_state *state = tevent_req_data(req, + struct tstream_bsd_writev_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_bsd_disconnect_state { + void *__dummy; +}; + +static struct tevent_req *tstream_bsd_disconnect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream) +{ + struct tstream_bsd *bsds = tstream_context_data(stream, struct tstream_bsd); + struct tevent_req *req; + struct tstream_bsd_disconnect_state *state; + int ret; + int err; + bool dummy; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_bsd_disconnect_state); + if (req == NULL) { + return NULL; + } + + if (bsds->fd == -1) { + tevent_req_error(req, ENOTCONN); + goto post; + } + + ret = close(bsds->fd); + bsds->fd = -1; + err = tsocket_bsd_error_from_errno(ret, errno, &dummy); + if (tevent_req_error(req, err)) { + goto post; + } + + tevent_req_done(req); +post: + tevent_req_post(req, ev); + return req; +} + +static int tstream_bsd_disconnect_recv(struct tevent_req *req, + int *perrno) +{ + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + + tevent_req_received(req); + return ret; +} + +static const struct tstream_context_ops tstream_bsd_ops = { + .name = "bsd", + + .pending_bytes = tstream_bsd_pending_bytes, + + .readv_send = tstream_bsd_readv_send, + .readv_recv = tstream_bsd_readv_recv, + + .writev_send = tstream_bsd_writev_send, + .writev_recv = tstream_bsd_writev_recv, + + .disconnect_send = tstream_bsd_disconnect_send, + .disconnect_recv = tstream_bsd_disconnect_recv, +}; + +static int tstream_bsd_destructor(struct tstream_bsd *bsds) +{ + TALLOC_FREE(bsds->fde); + if (bsds->fd != -1) { + close(bsds->fd); + bsds->fd = -1; + } + return 0; +} + +int _tstream_bsd_existing_socket(TALLOC_CTX *mem_ctx, + int fd, + struct tstream_context **_stream, + const char *location) +{ + struct tstream_context *stream; + struct tstream_bsd *bsds; + + stream = tstream_context_create(mem_ctx, + &tstream_bsd_ops, + &bsds, + struct tstream_bsd, + location); + if (!stream) { + return -1; + } + ZERO_STRUCTP(bsds); + bsds->fd = fd; + talloc_set_destructor(bsds, tstream_bsd_destructor); + + *_stream = stream; + return 0; +} + +struct tstream_bsd_connect_state { + int fd; + struct tevent_fd *fde; + struct tstream_conext *stream; +}; + +static int tstream_bsd_connect_destructor(struct tstream_bsd_connect_state *state) +{ + TALLOC_FREE(state->fde); + if (state->fd != -1) { + close(state->fd); + state->fd = -1; + } + + return 0; +} + +static void tstream_bsd_connect_fde_handler(struct tevent_context *ev, + struct tevent_fd *fde, + uint16_t flags, + void *private_data); + +static struct tevent_req * tstream_bsd_connect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + int sys_errno, + const struct tsocket_address *local, + const struct tsocket_address *remote) +{ + struct tevent_req *req; + struct tstream_bsd_connect_state *state; + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + struct tsocket_address_bsd *rbsda = + talloc_get_type_abort(remote->private_data, + struct tsocket_address_bsd); + int ret; + int err; + bool retry; + bool do_bind = false; + bool do_reuseaddr = false; + bool do_ipv6only = false; + bool is_inet = false; + int sa_fam = lbsda->u.sa.sa_family; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_bsd_connect_state); + if (!req) { + return NULL; + } + state->fd = -1; + state->fde = NULL; + + talloc_set_destructor(state, tstream_bsd_connect_destructor); + + /* give the wrappers a chance to report an error */ + if (sys_errno != 0) { + tevent_req_error(req, sys_errno); + goto post; + } + + switch (lbsda->u.sa.sa_family) { + case AF_UNIX: + if (lbsda->u.un.sun_path[0] != 0) { + do_reuseaddr = true; + do_bind = true; + } + break; + case AF_INET: + if (lbsda->u.in.sin_port != 0) { + do_reuseaddr = true; + do_bind = true; + } + if (lbsda->u.in.sin_addr.s_addr != INADDR_ANY) { + do_bind = true; + } + is_inet = true; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + if (lbsda->u.in6.sin6_port != 0) { + do_reuseaddr = true; + do_bind = true; + } + if (memcmp(&in6addr_any, + &lbsda->u.in6.sin6_addr, + sizeof(in6addr_any)) != 0) { + do_bind = true; + } + is_inet = true; + do_ipv6only = true; + break; +#endif + default: + tevent_req_error(req, EINVAL); + goto post; + } + + if (!do_bind && is_inet) { + sa_fam = rbsda->u.sa.sa_family; + switch (sa_fam) { + case AF_INET: + do_ipv6only = false; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + do_ipv6only = true; + break; +#endif + } + } + + state->fd = socket(sa_fam, SOCK_STREAM, 0); + if (state->fd == -1) { + tevent_req_error(req, errno); + goto post; + } + + state->fd = tsocket_bsd_common_prepare_fd(state->fd, true); + if (state->fd == -1) { + tevent_req_error(req, errno); + goto post; + } + +#ifdef HAVE_IPV6 + if (do_ipv6only) { + int val = 1; + + ret = setsockopt(state->fd, IPPROTO_IPV6, IPV6_V6ONLY, + (const void *)&val, sizeof(val)); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + } +#endif + + if (do_reuseaddr) { + int val = 1; + + ret = setsockopt(state->fd, SOL_SOCKET, SO_REUSEADDR, + (const void *)&val, sizeof(val)); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + } + + if (do_bind) { + ret = bind(state->fd, &lbsda->u.sa, lbsda->sa_socklen); + if (ret == -1) { + tevent_req_error(req, errno); + goto post; + } + } + + if (rbsda->u.sa.sa_family != sa_fam) { + tevent_req_error(req, EINVAL); + goto post; + } + + ret = connect(state->fd, &rbsda->u.sa, rbsda->sa_socklen); + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + goto async; + } + if (tevent_req_error(req, err)) { + goto post; + } + + tevent_req_done(req); + goto post; + + async: + state->fde = tevent_add_fd(ev, state, + state->fd, + TEVENT_FD_READ | TEVENT_FD_WRITE, + tstream_bsd_connect_fde_handler, + req); + if (tevent_req_nomem(state->fde, req)) { + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tstream_bsd_connect_fde_handler(struct tevent_context *ev, + struct tevent_fd *fde, + uint16_t flags, + void *private_data) +{ + struct tevent_req *req = talloc_get_type_abort(private_data, + struct tevent_req); + struct tstream_bsd_connect_state *state = tevent_req_data(req, + struct tstream_bsd_connect_state); + int ret; + int error=0; + socklen_t len = sizeof(error); + int err; + bool retry; + + ret = getsockopt(state->fd, SOL_SOCKET, SO_ERROR, &error, &len); + if (ret == 0) { + if (error != 0) { + errno = error; + ret = -1; + } + } + err = tsocket_bsd_error_from_errno(ret, errno, &retry); + if (retry) { + /* retry later */ + return; + } + if (tevent_req_error(req, err)) { + return; + } + + tevent_req_done(req); +} + +static int tstream_bsd_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream, + const char *location) +{ + struct tstream_bsd_connect_state *state = tevent_req_data(req, + struct tstream_bsd_connect_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = _tstream_bsd_existing_socket(mem_ctx, + state->fd, + stream, + location); + if (ret == -1) { + *perrno = errno; + goto done; + } + TALLOC_FREE(state->fde); + state->fd = -1; + } + +done: + tevent_req_received(req); + return ret; +} + +struct tevent_req * tstream_inet_tcp_connect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + const struct tsocket_address *local, + const struct tsocket_address *remote) +{ + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + struct tevent_req *req; + int sys_errno = 0; + + switch (lbsda->u.sa.sa_family) { + case AF_INET: + break; +#ifdef HAVE_IPV6 + case AF_INET6: + break; +#endif + default: + sys_errno = EINVAL; + break; + } + + req = tstream_bsd_connect_send(mem_ctx, ev, sys_errno, local, remote); + + return req; +} + +int _tstream_inet_tcp_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream, + const char *location) +{ + return tstream_bsd_connect_recv(req, perrno, mem_ctx, stream, location); +} + +struct tevent_req * tstream_unix_connect_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + const struct tsocket_address *local, + const struct tsocket_address *remote) +{ + struct tsocket_address_bsd *lbsda = + talloc_get_type_abort(local->private_data, + struct tsocket_address_bsd); + struct tevent_req *req; + int sys_errno = 0; + + switch (lbsda->u.sa.sa_family) { + case AF_UNIX: + break; + default: + sys_errno = EINVAL; + break; + } + + req = tstream_bsd_connect_send(mem_ctx, ev, sys_errno, local, remote); + + return req; +} + +int _tstream_unix_connect_recv(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + struct tstream_context **stream, + const char *location) +{ + return tstream_bsd_connect_recv(req, perrno, mem_ctx, stream, location); +} + +int _tstream_unix_socketpair(TALLOC_CTX *mem_ctx1, + struct tstream_context **_stream1, + TALLOC_CTX *mem_ctx2, + struct tstream_context **_stream2, + const char *location) +{ + int ret; + int fds[2]; + int fd1; + int fd2; + struct tstream_context *stream1 = NULL; + struct tstream_context *stream2 = NULL; + + ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fds); + if (ret == -1) { + return -1; + } + fd1 = fds[0]; + fd2 = fds[1]; + + fd1 = tsocket_bsd_common_prepare_fd(fd1, true); + if (fd1 == -1) { + int sys_errno = errno; + close(fd2); + errno = sys_errno; + return -1; + } + + fd2 = tsocket_bsd_common_prepare_fd(fd2, true); + if (fd2 == -1) { + int sys_errno = errno; + close(fd1); + errno = sys_errno; + return -1; + } + + ret = _tstream_bsd_existing_socket(mem_ctx1, + fd1, + &stream1, + location); + if (ret == -1) { + int sys_errno = errno; + close(fd1); + close(fd2); + errno = sys_errno; + return -1; + } + + ret = _tstream_bsd_existing_socket(mem_ctx2, + fd2, + &stream2, + location); + if (ret == -1) { + int sys_errno = errno; + talloc_free(stream1); + close(fd2); + errno = sys_errno; + return -1; + } + + *_stream1 = stream1; + *_stream2 = stream2; + return 0; +} + diff --git a/libatalk/tsocket/tsocket_helpers.c b/libatalk/tsocket/tsocket_helpers.c new file mode 100644 index 00000000..3a41a3ef --- /dev/null +++ b/libatalk/tsocket/tsocket_helpers.c @@ -0,0 +1,518 @@ +/* + Unix SMB/CIFS implementation. + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the tsocket + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . +*/ + +#include "replace.h" +#include "system/filesys.h" +#include "tsocket.h" +#include "tsocket_internal.h" + +struct tdgram_sendto_queue_state { + /* this structs are owned by the caller */ + struct { + struct tevent_context *ev; + struct tdgram_context *dgram; + const uint8_t *buf; + size_t len; + const struct tsocket_address *dst; + } caller; + ssize_t ret; +}; + +static void tdgram_sendto_queue_trigger(struct tevent_req *req, + void *private_data); +static void tdgram_sendto_queue_done(struct tevent_req *subreq); + +struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + struct tevent_queue *queue, + const uint8_t *buf, + size_t len, + struct tsocket_address *dst) +{ + struct tevent_req *req; + struct tdgram_sendto_queue_state *state; + bool ok; + + req = tevent_req_create(mem_ctx, &state, + struct tdgram_sendto_queue_state); + if (!req) { + return NULL; + } + + state->caller.ev = ev; + state->caller.dgram = dgram; + state->caller.buf = buf; + state->caller.len = len; + state->caller.dst = dst; + state->ret = -1; + + ok = tevent_queue_add(queue, + ev, + req, + tdgram_sendto_queue_trigger, + NULL); + if (!ok) { + tevent_req_nomem(NULL, req); + goto post; + } + + return req; + + post: + tevent_req_post(req, ev); + return req; +} + +static void tdgram_sendto_queue_trigger(struct tevent_req *req, + void *private_data) +{ + struct tdgram_sendto_queue_state *state = tevent_req_data(req, + struct tdgram_sendto_queue_state); + struct tevent_req *subreq; + + subreq = tdgram_sendto_send(state, + state->caller.ev, + state->caller.dgram, + state->caller.buf, + state->caller.len, + state->caller.dst); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, tdgram_sendto_queue_done, req); +} + +static void tdgram_sendto_queue_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tdgram_sendto_queue_state *state = tevent_req_data(req, + struct tdgram_sendto_queue_state); + ssize_t ret; + int sys_errno; + + ret = tdgram_sendto_recv(subreq, &sys_errno); + talloc_free(subreq); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + state->ret = ret; + + tevent_req_done(req); +} + +ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno) +{ + struct tdgram_sendto_queue_state *state = tevent_req_data(req, + struct tdgram_sendto_queue_state); + ssize_t ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_readv_pdu_state { + /* this structs are owned by the caller */ + struct { + struct tevent_context *ev; + struct tstream_context *stream; + tstream_readv_pdu_next_vector_t next_vector_fn; + void *next_vector_private; + } caller; + + /* + * Each call to the callback resets iov and count + * the callback allocated the iov as child of our state, + * that means we are allowed to modify and free it. + * + * we should call the callback every time we filled the given + * vector and ask for a new vector. We return if the callback + * ask for 0 bytes. + */ + struct iovec *vector; + size_t count; + + /* + * the total number of bytes we read, + * the return value of the _recv function + */ + int total_read; +}; + +static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req); +static void tstream_readv_pdu_readv_done(struct tevent_req *subreq); + +struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + tstream_readv_pdu_next_vector_t next_vector_fn, + void *next_vector_private) +{ + struct tevent_req *req; + struct tstream_readv_pdu_state *state; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_readv_pdu_state); + if (!req) { + return NULL; + } + + state->caller.ev = ev; + state->caller.stream = stream; + state->caller.next_vector_fn = next_vector_fn; + state->caller.next_vector_private = next_vector_private; + + state->vector = NULL; + state->count = 0; + state->total_read = 0; + + tstream_readv_pdu_ask_for_next_vector(req); + if (!tevent_req_is_in_progress(req)) { + goto post; + } + + return req; + + post: + return tevent_req_post(req, ev); +} + +static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req) +{ + struct tstream_readv_pdu_state *state = tevent_req_data(req, + struct tstream_readv_pdu_state); + int ret; + size_t to_read = 0; + size_t i; + struct tevent_req *subreq; + + TALLOC_FREE(state->vector); + state->count = 0; + + ret = state->caller.next_vector_fn(state->caller.stream, + state->caller.next_vector_private, + state, &state->vector, &state->count); + if (ret == -1) { + tevent_req_error(req, errno); + return; + } + + if (state->count == 0) { + tevent_req_done(req); + return; + } + + for (i=0; i < state->count; i++) { + size_t tmp = to_read; + tmp += state->vector[i].iov_len; + + if (tmp < to_read) { + tevent_req_error(req, EMSGSIZE); + return; + } + + to_read = tmp; + } + + /* + * this is invalid the next vector function should have + * reported count == 0. + */ + if (to_read == 0) { + tevent_req_error(req, EINVAL); + return; + } + + if (state->total_read + to_read < state->total_read) { + tevent_req_error(req, EMSGSIZE); + return; + } + + subreq = tstream_readv_send(state, + state->caller.ev, + state->caller.stream, + state->vector, + state->count); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, tstream_readv_pdu_readv_done, req); +} + +static void tstream_readv_pdu_readv_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_readv_pdu_state *state = tevent_req_data(req, + struct tstream_readv_pdu_state); + int ret; + int sys_errno; + + ret = tstream_readv_recv(subreq, &sys_errno); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + + state->total_read += ret; + + /* ask the callback for a new vector we should fill */ + tstream_readv_pdu_ask_for_next_vector(req); +} + +int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno) +{ + struct tstream_readv_pdu_state *state = tevent_req_data(req, + struct tstream_readv_pdu_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->total_read; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_readv_pdu_queue_state { + /* this structs are owned by the caller */ + struct { + struct tevent_context *ev; + struct tstream_context *stream; + tstream_readv_pdu_next_vector_t next_vector_fn; + void *next_vector_private; + } caller; + int ret; +}; + +static void tstream_readv_pdu_queue_trigger(struct tevent_req *req, + void *private_data); +static void tstream_readv_pdu_queue_done(struct tevent_req *subreq); + +struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct tevent_queue *queue, + tstream_readv_pdu_next_vector_t next_vector_fn, + void *next_vector_private) +{ + struct tevent_req *req; + struct tstream_readv_pdu_queue_state *state; + bool ok; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_readv_pdu_queue_state); + if (!req) { + return NULL; + } + + state->caller.ev = ev; + state->caller.stream = stream; + state->caller.next_vector_fn = next_vector_fn; + state->caller.next_vector_private = next_vector_private; + state->ret = -1; + + ok = tevent_queue_add(queue, + ev, + req, + tstream_readv_pdu_queue_trigger, + NULL); + if (!ok) { + tevent_req_nomem(NULL, req); + goto post; + } + + return req; + + post: + return tevent_req_post(req, ev); +} + +static void tstream_readv_pdu_queue_trigger(struct tevent_req *req, + void *private_data) +{ + struct tstream_readv_pdu_queue_state *state = tevent_req_data(req, + struct tstream_readv_pdu_queue_state); + struct tevent_req *subreq; + + subreq = tstream_readv_pdu_send(state, + state->caller.ev, + state->caller.stream, + state->caller.next_vector_fn, + state->caller.next_vector_private); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, tstream_readv_pdu_queue_done ,req); +} + +static void tstream_readv_pdu_queue_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_readv_pdu_queue_state *state = tevent_req_data(req, + struct tstream_readv_pdu_queue_state); + int ret; + int sys_errno; + + ret = tstream_readv_pdu_recv(subreq, &sys_errno); + talloc_free(subreq); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + state->ret = ret; + + tevent_req_done(req); +} + +int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno) +{ + struct tstream_readv_pdu_queue_state *state = tevent_req_data(req, + struct tstream_readv_pdu_queue_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + +struct tstream_writev_queue_state { + /* this structs are owned by the caller */ + struct { + struct tevent_context *ev; + struct tstream_context *stream; + const struct iovec *vector; + size_t count; + } caller; + int ret; +}; + +static void tstream_writev_queue_trigger(struct tevent_req *req, + void *private_data); +static void tstream_writev_queue_done(struct tevent_req *subreq); + +struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct tevent_queue *queue, + const struct iovec *vector, + size_t count) +{ + struct tevent_req *req; + struct tstream_writev_queue_state *state; + bool ok; + + req = tevent_req_create(mem_ctx, &state, + struct tstream_writev_queue_state); + if (!req) { + return NULL; + } + + state->caller.ev = ev; + state->caller.stream = stream; + state->caller.vector = vector; + state->caller.count = count; + state->ret = -1; + + ok = tevent_queue_add(queue, + ev, + req, + tstream_writev_queue_trigger, + NULL); + if (!ok) { + tevent_req_nomem(NULL, req); + goto post; + } + + return req; + + post: + return tevent_req_post(req, ev); +} + +static void tstream_writev_queue_trigger(struct tevent_req *req, + void *private_data) +{ + struct tstream_writev_queue_state *state = tevent_req_data(req, + struct tstream_writev_queue_state); + struct tevent_req *subreq; + + subreq = tstream_writev_send(state, + state->caller.ev, + state->caller.stream, + state->caller.vector, + state->caller.count); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, tstream_writev_queue_done ,req); +} + +static void tstream_writev_queue_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data(subreq, + struct tevent_req); + struct tstream_writev_queue_state *state = tevent_req_data(req, + struct tstream_writev_queue_state); + int ret; + int sys_errno; + + ret = tstream_writev_recv(subreq, &sys_errno); + talloc_free(subreq); + if (ret == -1) { + tevent_req_error(req, sys_errno); + return; + } + state->ret = ret; + + tevent_req_done(req); +} + +int tstream_writev_queue_recv(struct tevent_req *req, int *perrno) +{ + struct tstream_writev_queue_state *state = tevent_req_data(req, + struct tstream_writev_queue_state); + int ret; + + ret = tsocket_simple_int_recv(req, perrno); + if (ret == 0) { + ret = state->ret; + } + + tevent_req_received(req); + return ret; +} + diff --git a/libatalk/tsocket/tsocket_internal.h b/libatalk/tsocket/tsocket_internal.h new file mode 100644 index 00000000..154b2ce6 --- /dev/null +++ b/libatalk/tsocket/tsocket_internal.h @@ -0,0 +1,144 @@ +/* + Unix SMB/CIFS implementation. + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the tsocket + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . +*/ + +#ifndef _TSOCKET_INTERNAL_H +#define _TSOCKET_INTERNAL_H + +#include +#include + +struct tsocket_address_ops { + const char *name; + + char *(*string)(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx); + + struct tsocket_address *(*copy)(const struct tsocket_address *addr, + TALLOC_CTX *mem_ctx, + const char *location); +}; + +struct tsocket_address { + const char *location; + const struct tsocket_address_ops *ops; + + void *private_data; +}; + +struct tsocket_address *_tsocket_address_create(TALLOC_CTX *mem_ctx, + const struct tsocket_address_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location); +#define tsocket_address_create(mem_ctx, ops, state, type, location) \ + _tsocket_address_create(mem_ctx, ops, state, sizeof(type), \ + #type, location) + +struct tdgram_context_ops { + const char *name; + + struct tevent_req *(*recvfrom_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram); + ssize_t (*recvfrom_recv)(struct tevent_req *req, + int *perrno, + TALLOC_CTX *mem_ctx, + uint8_t **buf, + struct tsocket_address **src); + + struct tevent_req *(*sendto_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram, + const uint8_t *buf, size_t len, + const struct tsocket_address *dst); + ssize_t (*sendto_recv)(struct tevent_req *req, + int *perrno); + + struct tevent_req *(*disconnect_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tdgram_context *dgram); + int (*disconnect_recv)(struct tevent_req *req, + int *perrno); +}; + +struct tdgram_context *_tdgram_context_create(TALLOC_CTX *mem_ctx, + const struct tdgram_context_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location); +#define tdgram_context_create(mem_ctx, ops, state, type, location) \ + _tdgram_context_create(mem_ctx, ops, state, sizeof(type), \ + #type, location) + +void *_tdgram_context_data(struct tdgram_context *dgram); +#define tdgram_context_data(_req, _type) \ + talloc_get_type_abort(_tdgram_context_data(_req), _type) + +struct tstream_context_ops { + const char *name; + + ssize_t (*pending_bytes)(struct tstream_context *stream); + + struct tevent_req *(*readv_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + struct iovec *vector, + size_t count); + int (*readv_recv)(struct tevent_req *req, + int *perrno); + + struct tevent_req *(*writev_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream, + const struct iovec *vector, + size_t count); + int (*writev_recv)(struct tevent_req *req, + int *perrno); + + struct tevent_req *(*disconnect_send)(TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct tstream_context *stream); + int (*disconnect_recv)(struct tevent_req *req, + int *perrno); +}; + +struct tstream_context *_tstream_context_create(TALLOC_CTX *mem_ctx, + const struct tstream_context_ops *ops, + void *pstate, + size_t psize, + const char *type, + const char *location); +#define tstream_context_create(mem_ctx, ops, state, type, location) \ + _tstream_context_create(mem_ctx, ops, state, sizeof(type), \ + #type, location) + +void *_tstream_context_data(struct tstream_context *stream); +#define tstream_context_data(_req, _type) \ + talloc_get_type_abort(_tstream_context_data(_req), _type) + +int tsocket_simple_int_recv(struct tevent_req *req, int *perrno); + +#endif /* _TSOCKET_H */ +