Created
June 25, 2019 13:08
-
-
Save nderjung/9ffc844e02fe8ba80a78e7e282520e91 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 29f0bfc54b504d6cc5c371afd5623164a8020f09 Mon Sep 17 00:00:00 2001 | |
From: Haris Rotsos <[email protected]> | |
Date: Mon, 10 Jun 2019 15:55:10 +0000 | |
Subject: [PATCH 2/2] fixing page sharing and making the compilation to work | |
with the latest clickos update | |
--- | |
include/xensocket.h | 137 ++++--- | |
stub.mk | 2 +- | |
xensocket.c | 1078 +++++++++++++++++++++++++++------------------------ | |
3 files changed, 658 insertions(+), 559 deletions(-) | |
diff --git a/include/xensocket.h b/include/xensocket.h | |
index 3e97fcf..2e2c2a8 100644 | |
--- a/include/xensocket.h | |
+++ b/include/xensocket.h | |
@@ -17,53 +17,86 @@ | |
#ifndef __XENSOCKET_H__ | |
#define __XENSOCKET_H__ | |
-struct descriptor_page; | |
-struct xen_sock; | |
-static void | |
-initialize_xen_sock (struct xen_sock *sock); | |
+#include <mini-os/types.h> | |
-static int | |
-xen_bind (struct xen_sock *sock, uint16_t); | |
- | |
-static int | |
-server_allocate_descriptor_page (struct xen_sock *x); | |
- | |
-static int | |
-server_allocate_event_channel (struct xen_sock *x); | |
- | |
-static int | |
-server_allocate_buffer_pages (struct xen_sock *x); | |
- | |
-static int | |
-xen_connect (struct xen_sock *, uint16_t, int); | |
- | |
-static int | |
-client_map_descriptor_page (struct xen_sock *x); | |
- | |
-static int | |
-client_bind_event_channel (struct xen_sock *x); | |
- | |
-static int | |
-client_map_buffer_pages (struct xen_sock *x); | |
- | |
-static int | |
-xen_sendmsg (struct xen_sock *sock, void *data, size_t len); | |
- | |
-static inline int | |
-is_writeable (struct descriptor_page *d); | |
+/* struct xen_sock: | |
+ * | |
+ */ | |
+struct xen_sock { | |
+ unsigned char is_server, is_client; | |
+ domid_t otherend_id; | |
+ struct descriptor_page *descriptor_addr; /* server and client */ | |
+ int descriptor_gref; /* server only */ | |
+ grant_handle_t descriptor_handle; /* client only */ | |
+ unsigned int evtchn_local_port; | |
+ uint16_t sockid; | |
+ unsigned int irq; | |
+ unsigned long buffer_addr; /* server and client */ | |
+ int *buffer_grefs; /* server */ | |
+ grant_handle_t *buffer_handles; /* client */ | |
+ int buffer_order; | |
+ void (*fn_rx)(unsigned char* data, int len, void *arg); | |
+ void *fn_rx_arg; | |
+}; | |
+ | |
+struct descriptor_page { | |
+ uint32_t server_evtchn_port; | |
+ int buffer_order; /* num_pages = (1 << buffer_order) */ | |
+ int buffer_first_gref; | |
+ unsigned int send_offset; | |
+ unsigned int recv_offset; | |
+ unsigned int total_bytes_sent; | |
+ unsigned int total_bytes_received; | |
+ unsigned int sender_is_blocking; | |
+ atomic_t avail_bytes; | |
+ atomic_t sender_has_shutdown; | |
+ atomic_t force_sender_shutdown; | |
+}; | |
+ | |
+void initialize_xen_sock (struct xen_sock *); | |
+int xen_bind (struct xen_sock *, uint16_t, uint16_t); | |
+int xen_connect (struct xen_sock *, uint16_t, uint16_t); | |
+int xen_sendmsg (struct xen_sock *, const void *, size_t); | |
+int xen_recvmsg (struct xen_sock *, void *, size_t); | |
+int xen_shutdown (struct xen_sock *, int); | |
+ | |
+void xensock_set_rx_handler(struct xen_sock *dev, | |
+void (*thenetif_rx)(unsigned char* data, int len, void *arg), | |
+void *arg); | |
+int xensock_rx (struct xen_sock *x); | |
+ | |
+// static int | |
+// server_allocate_descriptor_page (struct xen_sock *x); | |
+ | |
+// static int | |
+// server_allocate_event_channel (struct xen_sock *x); | |
+ | |
+// static int | |
+// server_allocate_buffer_pages (struct xen_sock *x); | |
+ | |
+ | |
+// static int | |
+// client_map_descriptor_page (struct xen_sock *x); | |
+ | |
+// static int | |
+// client_bind_event_channel (struct xen_sock *x); | |
+ | |
+// static int | |
+// client_map_buffer_pages (struct xen_sock *x); | |
+ | |
+ | |
+// static inline int | |
+// is_writeable (struct descriptor_page *d); | |
//static long | |
//send_data_wait (struct sock *sk, long timeo); | |
-static int | |
-xen_recvmsg (struct xen_sock *, void *, size_t); | |
+// static inline int | |
+// is_readable (struct descriptor_page *d); | |
-static inline int | |
-is_readable (struct descriptor_page *d); | |
- | |
-static long | |
-receive_data_wait (struct xen_sock *, long); | |
+// static long | |
+// receive_data_wait (struct xen_sock *, long); | |
/*static irqreturn_t | |
server_interrupt (int irq, void *dev_id, struct pt_regs *regs); */ | |
@@ -71,23 +104,17 @@ server_interrupt (int irq, void *dev_id, struct pt_regs *regs); */ | |
//static int | |
// local_memcpy_toiovecend (struct iovec *iov, unsigned char *kdata, int offset, int len); | |
-static int | |
-xen_release (struct xen_sock *sock); | |
- | |
-static int | |
-xen_shutdown (struct xen_sock *sock, int how); | |
- | |
-static void | |
-server_unallocate_buffer_pages (struct xen_sock *x); | |
+// static void | |
+// server_unallocate_buffer_pages (struct xen_sock *x); | |
-static void | |
-server_unallocate_descriptor_page (struct xen_sock *x); | |
+// static void | |
+// server_unallocate_descriptor_page (struct xen_sock *x); | |
-static void | |
-client_unmap_buffer_pages (struct xen_sock *x); | |
+// static void | |
+// client_unmap_buffer_pages (struct xen_sock *x); | |
-static void | |
-client_unmap_descriptor_page (struct xen_sock *x); | |
+// static void | |
+// client_unmap_descriptor_page (struct xen_sock *x); | |
/* static int __init | |
xensocket_init (void); | |
@@ -97,6 +124,6 @@ xensocket_exit (void); */ | |
-#define xen_sk(__sk) ((struct xen_sock *)__sk) | |
+// #define xen_sk(__sk) ((struct xen_sock *)__sk) | |
#endif /* __XENSOCKET_H__ */ | |
diff --git a/stub.mk b/stub.mk | |
index 373fa46..b44d5c5 100644 | |
--- a/stub.mk | |
+++ b/stub.mk | |
@@ -461,7 +461,7 @@ CFLAGS += -isystem $(GCC_INSTALL)include | |
CFLAGS += -Wall -Wno-format -Wno-redundant-decls -Wno-undef | |
CFLAGS += -fno-builtin -fno-stack-protector -fgnu89-inline | |
-CXXFLAGS += -Wall -Wno-format -Wno-redundant-decls -Wno-strict-aliasing -Wno-undef -Wno-pointer-arith | |
+CXXFLAGS += -std=gnu++11 -Wall -Wno-format -Wno-redundant-decls -Wno-strict-aliasing -Wno-undef -Wno-pointer-arith | |
CXXFLAGS += -fno-exceptions -fno-rtti -fpermissive -fno-builtin -fno-stack-protector | |
CDEFINES += -DHAVE_LIBC | |
diff --git a/xensocket.c b/xensocket.c | |
index f4ce035..f65cb3d 100644 | |
--- a/xensocket.c | |
+++ b/xensocket.c | |
@@ -21,66 +21,62 @@ | |
//#include <net/tcp_states.h> | |
//#include <xen/driver_util.h> | |
-#include <stdint.h> | |
-#include <mini-os/os.h> | |
#include <mini-os/errno.h> | |
-#include <mini-os/types.h> | |
-#include <mini-os/xmalloc.h> | |
-#include <mini-os/gnttab.h> | |
+#include <mini-os/wait.h> | |
#include <mini-os/events.h> | |
+#include <mini-os/gnttab.h> | |
+//#include <mini-os/os.h> | |
+#include <mini-os/xmalloc.h> | |
+#include <stdint.h> | |
#include <mini-os/xensocket.h> | |
-#define DPRINTK( x, args... ) printk("%s: line %d: " x, \ | |
- __FUNCTION__ , __LINE__ , ## args ); | |
+DECLARE_WAIT_QUEUE_HEAD(rx_queue); | |
+u_int rx_work_todo = 0; | |
+ | |
+#define DPRINTK(x, args...) printk("%s: line %d: " x, \ | |
+ __FUNCTION__, __LINE__, ##args); | |
#define DEBUG | |
#ifdef DEBUG | |
#define TRACE_ENTRY printk("Entering %s\n", __func__) | |
-#define TRACE_EXIT printk("Exiting %s\n", __func__) | |
+#define TRACE_EXIT printk("Exiting %s\n", __func__) | |
#else | |
-#define TRACE_ENTRY do {} while (0) | |
-#define TRACE_EXIT do {} while (0) | |
+#define TRACE_ENTRY \ | |
+ do \ | |
+ { \ | |
+ } while (0) | |
+#define TRACE_EXIT \ | |
+ do \ | |
+ { \ | |
+ } while (0) | |
#endif | |
#define TRACE_ERROR printk("Exiting (ERROR) %s\n", __func__) | |
/* ++++++++++++++++++++++++++++++++++++++++++++ */ | |
-#define CM_SET_GREF 0x01 | |
-#define CM_GET_GREF 0x02 | |
-#define CM_FREE_NODE 0x03 | |
-typedef struct{ | |
- uint16_t remote_domid; | |
- int shared_page_gref; | |
-}SHARE_PAGE_GREF; | |
+#define CM_SET_GREF 0x01 | |
+#define CM_GET_GREF 0x02 | |
+#define CM_FREE_NODE 0x03 | |
+typedef struct | |
+{ | |
+ uint16_t remote_domid; | |
+ int shared_page_gref; | |
+} SHARE_PAGE_GREF; | |
/* +++++++++++++++++++++++++++++++++++++++++++ */ | |
/************************************************************************ | |
* Data structures for internal recordkeeping and shared memory. | |
************************************************************************/ | |
-struct descriptor_page { | |
- uint32_t server_evtchn_port; | |
- int buffer_order; /* num_pages = (1 << buffer_order) */ | |
- int buffer_first_gref; | |
- unsigned int send_offset; | |
- unsigned int recv_offset; | |
- unsigned int total_bytes_sent; | |
- unsigned int total_bytes_received; | |
- unsigned int sender_is_blocking; | |
- atomic_t avail_bytes; | |
- atomic_t sender_has_shutdown; | |
- atomic_t force_sender_shutdown; | |
-}; | |
- | |
-static inline void atomic_set(atomic_t *v, int i) {v-> counter = i;} | |
-static inline int atomic_read(atomic_t *v) {return v-> counter;} | |
-static inline void atomic_add(int i, atomic_t *v) {v->counter += i;} | |
-static inline void atomic_sub(int i, atomic_t *v) {v->counter -= i;} | |
- | |
-static inline unsigned int min(unsigned int a, unsigned int b) {return a<b?a:b;} | |
- | |
- static void | |
-initialize_descriptor_page (struct descriptor_page *d) | |
+static inline void atomic_set(atomic_t *v, int i) { v->counter = i; } | |
+static inline int atomic_read(atomic_t *v) { return v->counter; } | |
+static inline void atomic_add(int i, atomic_t *v) { v->counter += i; } | |
+static inline void atomic_sub(int i, atomic_t *v) { v->counter -= i; } | |
+ | |
+static inline unsigned int min(unsigned int a, unsigned int b) { return a < b ? a : b; } | |
+ | |
+static void | |
+initialize_descriptor_page(struct descriptor_page *d) | |
{ | |
d->server_evtchn_port = -1; | |
d->buffer_order = -1; | |
@@ -95,31 +91,16 @@ initialize_descriptor_page (struct descriptor_page *d) | |
atomic_set(&d->force_sender_shutdown, 0); | |
} | |
-/* struct xen_sock: | |
- * | |
- * @sk: this must be the first element in the structure. | |
- */ | |
-struct xen_sock { | |
- //struct sock sk; | |
- unsigned char is_server, is_client; | |
- domid_t otherend_id; | |
- struct descriptor_page *descriptor_addr; /* server and client */ | |
- int descriptor_gref; /* server only */ | |
- // struct vm_struct *descriptor_area; /* client only */ | |
- grant_handle_t descriptor_handle; /* client only */ | |
- unsigned int evtchn_local_port; | |
- unsigned int irq; | |
- unsigned long buffer_addr; /* server and client */ | |
- int *buffer_grefs; /* server */ | |
- grant_handle_t *buffer_handles; /* client */ | |
- int buffer_order; | |
-}; | |
+static void server_interrupt(evtchn_port_t, struct pt_regs *, void *); | |
+static void client_interrupt(evtchn_port_t, struct pt_regs *, void *); | |
+static int xen_release(struct xen_sock *sock); | |
-static void | |
-initialize_xen_sock (struct xen_sock *x) { | |
+void initialize_xen_sock(struct xen_sock *x) | |
+{ | |
x->is_server = 0; | |
x->is_client = 0; | |
x->otherend_id = -1; | |
+ x->sockid = -1; | |
x->descriptor_addr = NULL; | |
x->descriptor_gref = -ENOSPC; | |
//x->descriptor_area = NULL; | |
@@ -131,61 +112,22 @@ initialize_xen_sock (struct xen_sock *x) { | |
x->buffer_order = -1; | |
} | |
-static void server_interrupt (evtchn_port_t, struct pt_regs *, void *); | |
-static void client_interrupt (evtchn_port_t, struct pt_regs *, void *); | |
- | |
-/* static struct proto xen_proto = { | |
- .name = "XEN", | |
- .owner = THIS_MODULE, | |
- .obj_size = sizeof(struct xen_sock), | |
- }; */ | |
- | |
-/* static const struct proto_ops xen_stream_ops = { | |
- .family = AF_XEN, | |
- .owner = THIS_MODULE, | |
- .release = xen_release, | |
- .bind = xen_bind, | |
- .connect = xen_connect, | |
- .socketpair = sock_no_socketpair, | |
- .accept = sock_no_accept, | |
- .getname = sock_no_getname, | |
- .poll = sock_no_poll, | |
- .ioctl = sock_no_ioctl, | |
- .listen = sock_no_listen, | |
- .shutdown = xen_shutdown, | |
- .getsockopt = sock_no_getsockopt, | |
- .setsockopt = sock_no_setsockopt, | |
- .sendmsg = xen_sendmsg, | |
- .recvmsg = xen_recvmsg, | |
- .mmap = sock_no_mmap, | |
- .sendpage = sock_no_sendpage, | |
- }; */ | |
- | |
-/* static struct net_proto_family xen_family_ops = { | |
- .family = AF_XEN, | |
- .create = xen_create, | |
- .owner = THIS_MODULE, | |
- }; */ | |
- | |
- | |
/* TODO make this simpler */ | |
-static int | |
-xen_shutdown (struct xen_sock *x, int how) { | |
- //struct sock *sk = sock->sk; | |
- //struct xen_sock *x; | |
+int xen_shutdown(struct xen_sock *x, int how) | |
+{ | |
struct descriptor_page *d; | |
- SHARE_PAGE_GREF hypercall_arg; | |
- //x = xen_sk(sk); | |
+ // SHARE_PAGE_GREF hypercall_arg; | |
d = x->descriptor_addr; | |
- if (x->is_server) { | |
+ if (x->is_server) | |
+ { | |
/* +++++++++++++++++++++++++++++++++++++++++++ */ | |
- hypercall_arg.remote_domid = x->otherend_id; | |
- hypercall_arg.shared_page_gref = -1; | |
- if ( _hypercall2(long, myhpcall_gref_handler, CM_FREE_NODE, &hypercall_arg) ) | |
- { | |
- DPRINTK("ERR: free node failed.\n"); | |
- } | |
+ // hypercall_arg.remote_domid = x->otherend_id; | |
+ // hypercall_arg.shared_page_gref = -1; | |
+ // if ( _hypercall2(long, myhpcall_gref_handler, CM_FREE_NODE, &hypercall_arg) ) | |
+ // { | |
+ // DPRINTK("ERR: free node failed.\n"); | |
+ // } | |
/* +++++++++++++++++++++++++++++++++++++++++++ */ | |
atomic_set(&d->force_sender_shutdown, 1); | |
} | |
@@ -193,80 +135,200 @@ xen_shutdown (struct xen_sock *x, int how) { | |
return xen_release(x); | |
} | |
-/************************************************************************ | |
- * Server-side connection setup functions. | |
- ************************************************************************/ | |
+static void | |
+server_unallocate_buffer_pages(struct xen_sock *x) | |
+{ | |
+ if (x->buffer_grefs) | |
+ { | |
+ int buffer_num_pages = (1 << x->buffer_order); | |
+ int i; | |
-/* In our nonstandard use of the bind function, the return value is the | |
- * grant table entry of the descriptor page. | |
- */ | |
-static int | |
-xen_bind (struct xen_sock *x, uint16_t remote_domid) { | |
- int rc = -EINVAL; | |
- SHARE_PAGE_GREF hypercall_arg; | |
- TRACE_ENTRY; | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
+ if (x->buffer_grefs[i] == -ENOSPC) | |
+ { | |
+ break; | |
+ } | |
- /* Ensure that bind() is only called once for this socket. | |
- */ | |
- if (x->is_server) { | |
- DPRINTK("error: cannot call bind() more than once on a socket\n"); | |
- goto err; | |
- } | |
- if (x->is_client) { | |
- DPRINTK("error: cannot call both bind() and connect() on the same socket\n"); | |
- goto err; | |
+ gnttab_end_access(x->buffer_grefs[i]); | |
+ x->buffer_grefs[i] = -ENOSPC; | |
+ } | |
+ | |
+ free(x->buffer_grefs); | |
+ x->buffer_grefs = NULL; | |
} | |
- x->is_server = 1; | |
- x->otherend_id = remote_domid; | |
- if ((rc = server_allocate_descriptor_page(x)) != 0) { | |
- goto err; | |
+ | |
+ if (x->buffer_addr) | |
+ { | |
+ struct descriptor_page *d = x->descriptor_addr; | |
+ | |
+ free_pages((void *)x->buffer_addr, x->buffer_order); | |
+ x->buffer_addr = 0; | |
+ x->buffer_order = -1; | |
+ if (d) | |
+ { | |
+ d->buffer_order = -1; | |
+ } | |
} | |
+} | |
- if ((rc = server_allocate_event_channel(x)) != 0) { | |
- goto err; | |
+ | |
+ | |
+ | |
+static void | |
+server_unallocate_descriptor_page(struct xen_sock *x) | |
+{ | |
+ if (x->descriptor_gref != -ENOSPC) | |
+ { | |
+ gnttab_end_access(x->descriptor_gref); | |
+ x->descriptor_gref = -ENOSPC; | |
} | |
+ if (x->descriptor_addr) | |
+ { | |
+ free_page((void *)(x->descriptor_addr)); | |
+ x->descriptor_addr = NULL; | |
+ } | |
+} | |
- if ((rc = server_allocate_buffer_pages(x)) != 0) { | |
- goto err; | |
+static void | |
+client_unmap_buffer_pages(struct xen_sock *x) | |
+{ | |
+ | |
+ if (x->buffer_handles) | |
+ { | |
+ struct descriptor_page *d = x->descriptor_addr; | |
+ int buffer_order = d->buffer_order; | |
+ int buffer_num_pages = (1 << buffer_order); | |
+ int i; | |
+ struct gnttab_unmap_grant_ref op; | |
+ int rc = 0; | |
+ | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
+ if (x->buffer_handles[i] == -1) | |
+ { | |
+ break; | |
+ } | |
+ | |
+ memset(&op, 0, sizeof(op)); | |
+ op.host_addr = x->buffer_addr + i * PAGE_SIZE; | |
+ op.handle = x->buffer_handles[i]; | |
+ op.dev_bus_addr = 0; | |
+ | |
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); | |
+ if (rc == -ENOSYS) | |
+ { | |
+ printk("Failure to unmap grant reference \n"); | |
+ } | |
+ } | |
+ | |
+ free(x->buffer_handles); | |
+ x->buffer_handles = NULL; | |
} | |
+} | |
- /* A successful function exit returns the grant table reference. */ | |
- hypercall_arg.remote_domid = x->otherend_id; | |
- hypercall_arg.shared_page_gref = x->descriptor_gref; | |
- if ( _hypercall2(long, myhpcall_gref_handler, CM_SET_GREF, &hypercall_arg) ) | |
+static void | |
+client_unmap_descriptor_page(struct xen_sock *x) | |
+{ | |
+ struct descriptor_page *d; | |
+ int rc = 0; | |
+ | |
+ d = x->descriptor_addr; | |
+ | |
+ if (x->descriptor_handle != -1) | |
{ | |
- DPRINTK("ERR: set gref failed.\n"); | |
+ struct gnttab_unmap_grant_ref op; | |
+ | |
+ memset(&op, 0, sizeof(op)); | |
+ op.host_addr = (unsigned long)x->descriptor_addr; | |
+ op.handle = x->descriptor_handle; | |
+ op.dev_bus_addr = 0; | |
+ | |
+ atomic_set(&d->sender_has_shutdown, 1); | |
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); | |
+ if (rc == -ENOSYS) | |
+ { | |
+ printk("Failure to unmap grant reference for descriptor page\n"); | |
+ } | |
+ | |
+ x->descriptor_handle = -1; | |
} | |
- TRACE_EXIT; | |
- return x->descriptor_gref; | |
-err: | |
- TRACE_ERROR; | |
- return rc; | |
} | |
+void hexdump(void* data, size_t size) { | |
+ char ascii[17]; | |
+ size_t i, j; | |
+ ascii[16] = '\0'; | |
+ for (i = 0; i < size; ++i) { | |
+ printf("%02X ", ((unsigned char*)data)[i]); | |
+ if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') { | |
+ ascii[i % 16] = ((unsigned char*)data)[i]; | |
+ } else { | |
+ ascii[i % 16] = '.'; | |
+ } | |
+ if ((i+1) % 8 == 0 || i+1 == size) { | |
+ printf(" "); | |
+ if ((i+1) % 16 == 0) { | |
+ printf("| %s \n", ascii); | |
+ } else if (i+1 == size) { | |
+ ascii[(i+1) % 16] = '\0'; | |
+ if ((i+1) % 16 <= 8) { | |
+ printf(" "); | |
+ } | |
+ for (j = (i+1) % 16; j < 16; ++j) { | |
+ printf(" "); | |
+ } | |
+ printf("| %s \n", ascii); | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+ | |
static int | |
-server_allocate_descriptor_page (struct xen_sock *x) { | |
+server_allocate_descriptor_page(struct xen_sock *x) | |
+{ | |
+ char gref_desc_path[256], gref_str[256], *ret; | |
TRACE_ENTRY; | |
- if (x->descriptor_addr) { | |
+ if (x->descriptor_addr) | |
+ { | |
DPRINTK("error: already allocated server descriptor page\n"); | |
goto err; | |
} | |
if (!(x->descriptor_addr = | |
- (struct descriptor_page *)alloc_page())) { | |
+ (struct descriptor_page *)alloc_page())) | |
+ { | |
DPRINTK("error: cannot allocate free page\n"); | |
goto err_unalloc; | |
} | |
initialize_descriptor_page(x->descriptor_addr); | |
x->descriptor_gref = gnttab_grant_access(x->otherend_id, | |
- virt_to_mfn(x->descriptor_addr), 0); | |
- if (x->descriptor_gref == -ENOSPC) { | |
+ virt_to_mfn(x->descriptor_addr), 0); | |
+ | |
+ if (x->descriptor_gref < 0) | |
+ { | |
DPRINTK("error: cannot share descriptor page %p\n", x->descriptor_addr); | |
goto err_unalloc; | |
} | |
+ BUG_ON(x->descriptor_gref == GRANT_INVALID_REF); | |
+ | |
+ sprintf(gref_desc_path, "/local/domain/%d/data/socket/%d/gref", xenbus_get_self_id(), x->sockid); | |
+ sprintf(gref_str, "%d", x->descriptor_gref ); | |
+ if ((ret = xenbus_write(XBT_NIL, gref_desc_path, gref_str)) != NULL) | |
+ printk("XS ERROR:%s\n", ret); | |
+ else | |
+ printk("wrote details %s at %s\n", gref_desc_path, gref_str); | |
+ | |
+ printk("grant access to %d from %d with ref %s (%d)\n", x->otherend_id, | |
+ xenbus_get_self_id(), gref_str, x->descriptor_gref ); | |
+ | |
+ if ((ret = xenbus_set_perms(XBT_NIL, gref_desc_path, x->otherend_id, 'r')) != NULL) | |
+ printk("XS ERROR:%s\n", ret); | |
+ | |
TRACE_EXIT; | |
return 0; | |
@@ -280,21 +342,23 @@ err: | |
} | |
static int | |
-server_allocate_event_channel (struct xen_sock *x) { | |
+server_allocate_event_channel(struct xen_sock *x) | |
+{ | |
evtchn_port_t port; | |
- int rc; | |
+ int rc; | |
TRACE_ENTRY; | |
rc = evtchn_alloc_unbound(x->otherend_id, server_interrupt, | |
- x, &port); | |
- if ( rc != 0) { | |
+ x, &port); | |
+ if (rc != 0) | |
+ { | |
DPRINTK("Unable to allocate event channel\n"); | |
goto err; | |
- | |
} | |
x->evtchn_local_port = port; | |
x->descriptor_addr->server_evtchn_port = x->evtchn_local_port; | |
+ printk("Server port is %d\n", x->descriptor_addr->server_evtchn_port); | |
TRACE_EXIT; | |
return 0; | |
@@ -305,50 +369,60 @@ err: | |
} | |
static int | |
-server_allocate_buffer_pages (struct xen_sock *x) { | |
+server_allocate_buffer_pages(struct xen_sock *x) | |
+{ | |
struct descriptor_page *d = x->descriptor_addr; | |
- int buffer_num_pages; | |
- int i; | |
+ int buffer_num_pages; | |
+ int i; | |
TRACE_ENTRY; | |
- if (!d) { | |
+ if (!d) | |
+ { | |
/* must call server_allocate_descriptor_page first */ | |
DPRINTK("error: descriptor page not yet allocated\n"); | |
goto err; | |
} | |
- if (x->buffer_addr) { | |
+ if (x->buffer_addr) | |
+ { | |
DPRINTK("error: already allocated server buffer pages\n"); | |
goto err; | |
} | |
- x->buffer_order = 5; //32 pages /* you can change this as desired */ | |
+ x->buffer_order = 5; //32 pages /* you can change this as desired */ | |
buffer_num_pages = (1 << x->buffer_order); | |
- if (!(x->buffer_addr = alloc_pages(x->buffer_order))) { | |
+ if (!(x->buffer_addr = alloc_pages(x->buffer_order))) | |
+ { | |
DPRINTK("error: cannot allocate %d pages\n", buffer_num_pages); | |
goto err; | |
} | |
- if (!(x->buffer_grefs = malloc(buffer_num_pages * sizeof(int)))) { | |
+ if (!(x->buffer_grefs = malloc(buffer_num_pages * sizeof(int)))) | |
+ { | |
DPRINTK("error: unexpected memory allocation failure\n"); | |
goto err_unallocate; | |
} | |
- else { | |
+ else | |
+ { | |
/* Success, so first invalidate all the entries */ | |
- for (i = 0; i < buffer_num_pages; i++) { | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
x->buffer_grefs[i] = -ENOSPC; | |
} | |
} | |
- printk("x->buffer_addr = %lx PAGE_SIZE = %li buffer_num_pages = %d\n", \ | |
- x->buffer_addr, PAGE_SIZE, buffer_num_pages); | |
- for (i = 0; i < buffer_num_pages; i++) { | |
+ printk("x->buffer_addr = %lx PAGE_SIZE = %li buffer_num_pages = %d\n", | |
+ x->buffer_addr, PAGE_SIZE, buffer_num_pages); | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
x->buffer_grefs[i] = | |
gnttab_grant_access(x->otherend_id, | |
- virt_to_mfn(x->buffer_addr + i * PAGE_SIZE), 0); | |
- if (x->buffer_grefs[i] == -ENOSPC) { | |
+ virt_to_mfn(x->buffer_addr + i * PAGE_SIZE), 0); | |
+ printk("Allocating ref %d (%d)\n", x->buffer_grefs[i], i); | |
+ if (x->buffer_grefs[i] == -ENOSPC) | |
+ { | |
DPRINTK("error: cannot share buffer page #%d\n", i); | |
goto err_unallocate; | |
} | |
@@ -360,8 +434,9 @@ server_allocate_buffer_pages (struct xen_sock *x) { | |
*/ | |
d->buffer_first_gref = x->buffer_grefs[0]; | |
- for (i = 1; i < buffer_num_pages; i++) { | |
- int *next_gref = (int *)(x->buffer_addr + (i-1) * PAGE_SIZE); | |
+ for (i = 1; i < buffer_num_pages; i++) | |
+ { | |
+ int *next_gref = (int *)(x->buffer_addr + (i - 1) * PAGE_SIZE); | |
*next_gref = x->buffer_grefs[i]; | |
} | |
@@ -380,107 +455,94 @@ err: | |
} | |
/************************************************************************ | |
- * Client-side connection setup functions. | |
+ * Server-side connection setup functions. | |
************************************************************************/ | |
-static int | |
-xen_connect (struct xen_sock *x, uint16_t remote_domid, int shared_page_gref) { | |
- int rc = -EINVAL; | |
- SHARE_PAGE_GREF hypercall_arg; | |
+/* In our nonstandard use of the bind function, the return value is the | |
+ * grant table entry of the descriptor page. | |
+ */ | |
+int xen_bind(struct xen_sock *x, uint16_t remote_domid, uint16_t sockid) | |
+{ | |
+ int rc = -EINVAL; | |
+ // SHARE_PAGE_GREF hypercall_arg; | |
TRACE_ENTRY; | |
- /* Ensure that connect() is only called once for this socket. | |
- */ | |
- if (x->is_client) { | |
- DPRINTK("error: cannot call connect() more than once on a socket\n"); | |
+ /* Ensure that bind() is only called once for this socket. | |
+ */ | |
+ if (x->is_server) | |
+ { | |
+ DPRINTK("error: cannot call bind() more than once on a socket\n"); | |
goto err; | |
} | |
- if (x->is_server) { | |
+ if (x->is_client) | |
+ { | |
DPRINTK("error: cannot call both bind() and connect() on the same socket\n"); | |
goto err; | |
} | |
- x->is_client = 1; | |
- | |
+ x->is_server = 1; | |
x->otherend_id = remote_domid; | |
- x->descriptor_gref = shared_page_gref; | |
- | |
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ | |
- if( x->descriptor_gref <=0 ) | |
+ x->sockid = sockid; | |
+ if ((rc = server_allocate_descriptor_page(x)) != 0) | |
{ | |
- DPRINTK("####get gref by hypercall.\n"); | |
- hypercall_arg.remote_domid = remote_domid; | |
- hypercall_arg.shared_page_gref = -1; | |
- if ( _hypercall2(long, myhpcall_gref_handler, CM_GET_GREF, &hypercall_arg) ) | |
- { | |
- DPRINTK("ERR: get gref failed.\n"); | |
- goto err; | |
- } | |
- x->descriptor_gref = hypercall_arg.shared_page_gref; | |
- DPRINTK("shared_page_gref = %d.\n", x->descriptor_gref); | |
- } | |
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ | |
- | |
- if ((rc = client_map_descriptor_page(x)) != 0) { | |
goto err; | |
} | |
- if ((rc = client_bind_event_channel(x)) != 0) { | |
- goto err_unmap_descriptor; | |
+ if ((rc = server_allocate_event_channel(x)) != 0) | |
+ { | |
+ goto err; | |
} | |
- if ((rc = client_map_buffer_pages(x)) != 0) { | |
- goto err_unmap_buffer; | |
+ if ((rc = server_allocate_buffer_pages(x)) != 0) | |
+ { | |
+ goto err; | |
} | |
- | |
TRACE_EXIT; | |
- return 0; | |
- | |
-err_unmap_buffer: | |
- client_unmap_buffer_pages(x); | |
- | |
-err_unmap_descriptor: | |
- client_unmap_descriptor_page(x); | |
- notify_remote_via_evtchn(x->evtchn_local_port); | |
+ return x->descriptor_gref; | |
err: | |
+ TRACE_ERROR; | |
return rc; | |
} | |
static int | |
-client_map_descriptor_page (struct xen_sock *x) { | |
+client_map_descriptor_page(struct xen_sock *x) | |
+{ | |
struct gnttab_map_grant_ref op; | |
- int rc = -ENOMEM; | |
+ int rc = -ENOMEM; | |
TRACE_ENTRY; | |
- if (x->descriptor_addr) { | |
+ if (x->descriptor_addr) | |
+ { | |
DPRINTK("error: already allocated client descriptor page\n"); | |
goto err; | |
} | |
- if ((x->descriptor_addr = (struct descriptor_page *)alloc_page()) == NULL) { | |
+ // Note: do not write on the shared memory before the hypercall, | |
+ // as the memory will be dirtied and never shared. | |
+ if ((x->descriptor_addr = (struct descriptor_page *)alloc_page()) == NULL) | |
+ { | |
DPRINTK("error: cannot allocate memory for descriptor page\n"); | |
goto err; | |
} | |
- | |
- memset(&op, 0, sizeof(op)); | |
- op.host_addr = (uint64_t)x->descriptor_addr; | |
+ op.host_addr = (uint64_t)(x->descriptor_addr); | |
op.flags = GNTMAP_host_map; | |
- op.ref = x->descriptor_gref; | |
- op.dom = x->otherend_id; | |
+ op.ref = (grant_ref_t)(x->descriptor_gref); | |
+ op.dom = (domid_t)(x->otherend_id); | |
+ printk("mapping access to %d from %d with ref %d\n", op.dom, | |
+ xenbus_get_self_id(), op.ref ); | |
rc = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); | |
- if (rc == -ENOSYS) { | |
- goto err_unmap; | |
- } | |
- | |
- if (op.status) { | |
- DPRINTK("error: grant table mapping operation failed\n"); | |
+ if ((rc != 0) || (op.status != GNTST_okay)) | |
+ { | |
+ DPRINTK("error: grant table op call failed\n"); | |
goto err_unmap; | |
} | |
- | |
x->descriptor_handle = op.handle; | |
+ printk("port number %d, order %d\n", x->descriptor_addr->server_evtchn_port, | |
+ x->descriptor_addr->buffer_order); | |
+ | |
TRACE_EXIT; | |
return 0; | |
@@ -493,24 +555,26 @@ err: | |
} | |
static int | |
-client_bind_event_channel (struct xen_sock *x) { | |
- int rc; | |
+client_bind_event_channel(struct xen_sock *x) | |
+{ | |
+ int rc; | |
evtchn_port_t port; | |
TRACE_ENTRY; | |
/* Start by binding this end of the event channel to the other | |
* end of the event channel. */ | |
- if((rc=evtchn_bind_interdomain(x->otherend_id, | |
- x->descriptor_addr->server_evtchn_port, | |
- client_interrupt, x, &port)) != 0) { | |
+ DPRINTK("Other port is %d\n", x->descriptor_addr->server_evtchn_port); | |
+ if ((rc = evtchn_bind_interdomain(x->otherend_id, | |
+ x->descriptor_addr->server_evtchn_port, | |
+ client_interrupt, x, &port)) != 0) | |
+ { | |
DPRINTK("Unable to bind to sender's event channel\n"); | |
goto err; | |
} | |
x->evtchn_local_port = port; | |
- DPRINTK("Other port is %d\n", x->descriptor_addr->server_evtchn_port); | |
DPRINTK("My port is %d\n", port); | |
/* Next bind this end of the event channel to our local callback | |
@@ -526,23 +590,26 @@ err: | |
} | |
static int | |
-client_map_buffer_pages (struct xen_sock *x) { | |
+client_map_buffer_pages(struct xen_sock *x) | |
+{ | |
struct descriptor_page *d = x->descriptor_addr; | |
- int buffer_num_pages; | |
- int *grefp; | |
- int i; | |
+ int buffer_num_pages; | |
+ int grefp; | |
+ int i; | |
struct gnttab_map_grant_ref op; | |
- int rc = -ENOMEM; | |
+ int rc = -ENOMEM; | |
TRACE_ENTRY; | |
- if (!d) { | |
+ if (!d) | |
+ { | |
/* must call client_map_descriptor_page first */ | |
DPRINTK("error: descriptor page not yet mapped\n"); | |
goto err; | |
} | |
- if (d->buffer_order == -1) { | |
+ if (d->buffer_order == -1) | |
+ { | |
DPRINTK("error: server has not yet allocated buffer pages\n"); | |
goto err; | |
} | |
@@ -550,42 +617,51 @@ client_map_buffer_pages (struct xen_sock *x) { | |
x->buffer_order = d->buffer_order; | |
buffer_num_pages = (1 << x->buffer_order); | |
- if (!(x->buffer_handles = malloc(buffer_num_pages * sizeof(grant_handle_t)))) { | |
+ if (!(x->buffer_handles = malloc(buffer_num_pages * sizeof(grant_handle_t)))) | |
+ { | |
DPRINTK("error: unexpected memory allocation failure\n"); | |
goto err; | |
} | |
- else { | |
- for (i = 0; i < buffer_num_pages; i++) { | |
+ else | |
+ { | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
x->buffer_handles[i] = -1; | |
} | |
} | |
// this might be rather incorrect. | |
- if (!(x->buffer_addr = (unsigned long)alloc_pages(buffer_num_pages))) { | |
+ if (!(x->buffer_addr = (unsigned long)alloc_pages(d->buffer_order ))) | |
+ { | |
DPRINTK("error: cannot allocate %d buffer pages\n", buffer_num_pages); | |
goto err_unmap; | |
} | |
- grefp = &d->buffer_first_gref; | |
- for (i = 0; i < buffer_num_pages; i++) { | |
+ grefp = d->buffer_first_gref; | |
+ for (i = 0; i < buffer_num_pages; i++) | |
+ { | |
memset(&op, 0, sizeof(op)); | |
op.host_addr = x->buffer_addr + i * PAGE_SIZE; | |
op.flags = GNTMAP_host_map; | |
- op.ref = *grefp; | |
+ op.ref = (grefp - i); | |
op.dom = x->otherend_id; | |
+ | |
+ printk("Allocating ref %d (%d)\n", grefp - i, i); | |
rc = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); | |
- if (rc == -ENOSYS) { | |
+ if (rc == -ENOSYS) | |
+ { | |
goto err_unmap; | |
} | |
- if (op.status) { | |
+ if (op.status) | |
+ { | |
DPRINTK("error: grant table mapping failed\n"); | |
goto err_unmap; | |
} | |
x->buffer_handles[i] = op.handle; | |
- grefp = (int *)(x->buffer_addr + i * PAGE_SIZE); | |
+ //grefp = (int *)(x->buffer_addr + i * PAGE_SIZE); | |
} | |
TRACE_EXIT; | |
@@ -600,26 +676,92 @@ err: | |
} | |
/************************************************************************ | |
+ * Client-side connection setup functions. | |
+ ************************************************************************/ | |
+ | |
+int xen_connect(struct xen_sock *x, uint16_t remote_domid, uint16_t sockid) | |
+{ | |
+ int rc = -EINVAL; | |
+ char gref_desc_path[256], *gref_str, *ret; | |
+ //SHARE_PAGE_GREF hypercall_arg; | |
+ TRACE_ENTRY; | |
+ /* Ensure that connect() is only called once for this socket. | |
+ */ | |
+ | |
+ if (x->is_client) | |
+ { | |
+ DPRINTK("error: cannot call connect() more than once on a socket\n"); | |
+ goto err; | |
+ } | |
+ if (x->is_server) | |
+ { | |
+ DPRINTK("error: cannot call both bind() and connect() on the same socket\n"); | |
+ goto err; | |
+ } | |
+ x->is_client = 1; | |
+ | |
+ x->otherend_id = remote_domid; | |
+ x->sockid = sockid; | |
+ | |
+ sprintf(gref_desc_path, "/local/domain/%d/data/socket/%d/gref", | |
+ remote_domid, x->sockid); | |
+ if ((ret = xenbus_read(XBT_NIL, gref_desc_path, &gref_str)) != NULL) | |
+ printk("XS ERROR:%s\n", ret); | |
+ | |
+ x->descriptor_gref = atoi(gref_str); | |
+ printk("Read details %s at %d\n", gref_desc_path, x->descriptor_gref); | |
+ | |
+ if ((rc = client_map_descriptor_page(x)) != 0) | |
+ { | |
+ goto err; | |
+ } | |
+ | |
+ if ((rc = client_bind_event_channel(x)) != 0) | |
+ { | |
+ goto err_unmap_descriptor; | |
+ } | |
+ | |
+ if ((rc = client_map_buffer_pages(x)) != 0) | |
+ { | |
+ goto err_unmap_buffer; | |
+ } | |
+ | |
+ TRACE_EXIT; | |
+ return 0; | |
+ | |
+err_unmap_buffer: | |
+ client_unmap_buffer_pages(x); | |
+ | |
+err_unmap_descriptor: | |
+ client_unmap_descriptor_page(x); | |
+ notify_remote_via_evtchn(x->evtchn_local_port); | |
+ | |
+err: | |
+ return rc; | |
+} | |
+ | |
+/************************************************************************ | |
* Data transmission functions (client-only in a one-way communication | |
* channel). | |
************************************************************************/ | |
-static int | |
-xen_sendmsg (struct xen_sock *x, void *data, size_t len) { | |
+int xen_sendmsg(struct xen_sock *x, const void *data, size_t len) | |
+{ | |
// int rc = -EINVAL; | |
struct descriptor_page *d = x->descriptor_addr; | |
- unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
+ unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
// long timeo; | |
- unsigned int copied = 0; | |
+ unsigned int copied = 0; | |
+ len +=4; | |
- TRACE_ENTRY; | |
- | |
- while (copied < len) { | |
+ while (copied < len ) | |
+ { | |
unsigned int send_offset = d->send_offset; | |
unsigned int avail_bytes = atomic_read(&d->avail_bytes); | |
unsigned int bytes; | |
- if (atomic_read(&d->force_sender_shutdown) != 0) { | |
+ if (atomic_read(&d->force_sender_shutdown) != 0) | |
+ { | |
xen_release(x); | |
goto err; | |
} | |
@@ -639,21 +781,26 @@ xen_sendmsg (struct xen_sock *x, void *data, size_t len) { | |
continue; | |
}*/ | |
- if ((send_offset + bytes) > max_offset) { | |
+ if ((send_offset + bytes) > max_offset) | |
+ { | |
/* wrap around, need to copy twice */ | |
unsigned int bytes_segment1 = max_offset - send_offset; | |
unsigned int bytes_segment2 = bytes - bytes_segment1; | |
- /* TODO this may be wrong */ | |
+ /* TODO this is wrong*/ | |
memcpy((unsigned char *)(x->buffer_addr + send_offset), | |
- (data + copied), bytes_segment1); | |
+ (data + copied), bytes_segment1); | |
memcpy((unsigned char *)(x->buffer_addr), | |
- (data + copied + bytes_segment1), bytes_segment2); | |
+ (data + copied + bytes_segment1), bytes_segment2); | |
} | |
- else { | |
+ else | |
+ { | |
/* no need to wrap around */ | |
memcpy((unsigned char *)(x->buffer_addr + send_offset), | |
- (data + copied), bytes); | |
+ &len, sizeof(len)); | |
+ memcpy((unsigned char *)(x->buffer_addr + send_offset + sizeof(len)), | |
+ (data + copied), bytes); | |
+ printk("sending %d bytes\n", len); | |
} | |
/* Update values */ | |
@@ -665,7 +812,6 @@ xen_sendmsg (struct xen_sock *x, void *data, size_t len) { | |
notify_remote_via_evtchn(x->evtchn_local_port); | |
- TRACE_EXIT; | |
return copied; | |
err: | |
@@ -673,51 +819,18 @@ err: | |
return copied; | |
} | |
-static inline int | |
-is_writeable (struct descriptor_page *d) { | |
- unsigned int avail_bytes = atomic_read(&d->avail_bytes); | |
- if (avail_bytes > 0) | |
- return 1; | |
- | |
- return 0; | |
-} | |
+// static inline int | |
+// is_writeable (struct descriptor_page *d) { | |
+// unsigned int avail_bytes = atomic_read(&d->avail_bytes); | |
+// if (avail_bytes > 0) | |
+// return 1; | |
-/* static long | |
- send_data_wait (struct xen_sock *x, long timeo) { | |
- struct descriptor_page *d = x->descriptor_addr; | |
- DEFINE_WAIT(wait); | |
- | |
- TRACE_ENTRY; | |
- | |
- d->sender_is_blocking = 1; | |
- notify_remote_via_evtchn(x->evtchn_local_port); | |
- | |
- for (;;) { | |
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | |
- | |
- if (is_writeable(d) | |
- || !skb_queue_empty(&sk->sk_receive_queue) | |
- || sk->sk_err | |
- || (sk->sk_shutdown & RCV_SHUTDOWN) | |
- || signal_pending(current) | |
- || !timeo | |
- || atomic_read(&d->force_sender_shutdown)) { | |
- break; | |
- } | |
- | |
- timeo = schedule_timeout(timeo); | |
- } | |
- | |
- d->sender_is_blocking = 0; | |
- | |
- finish_wait(sk->sk_sleep, &wait); | |
- | |
- TRACE_EXIT; | |
- return timeo; | |
- }*/ | |
+// return 0; | |
+// } | |
static void | |
-client_interrupt (evtchn_port_t irq, struct pt_regs *regs, void *dev_id) { | |
+client_interrupt(evtchn_port_t irq, struct pt_regs *regs, void *dev_id) | |
+{ | |
// struct xen_sock *x = dev_id; | |
TRACE_ENTRY; | |
// TODO FIXME what happens when the interrupt fires? | |
@@ -732,60 +845,67 @@ client_interrupt (evtchn_port_t irq, struct pt_regs *regs, void *dev_id) { | |
* channel, but common to both in a two-way channel). | |
***********************************************************************/ | |
-static int | |
-xen_recvmsg (struct xen_sock *x, void *data, size_t size) { | |
+int xen_recvmsg(struct xen_sock *x, void *data, size_t size) | |
+{ | |
struct descriptor_page *d = x->descriptor_addr; | |
- unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
- int copied = 0; | |
- int target; | |
- long timeo; | |
+ unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
+ int copied = 0; | |
+ //int target; | |
+ //long timeo; | |
TRACE_ENTRY; | |
- while (copied < size) { | |
+ while (copied < size) | |
+ { | |
unsigned int recv_offset = d->recv_offset; | |
unsigned int bytes; | |
- unsigned int avail_bytes = max_offset - atomic_read(&d->avail_bytes); /* bytes available for read */ | |
+ unsigned int avail_bytes = max_offset - atomic_read(&d->avail_bytes); /* bytes available for read */ | |
/* Determine the maximum amount that can be read */ | |
bytes = min(size - copied, avail_bytes); | |
- if (atomic_read(&d->sender_has_shutdown) != 0) { | |
- if (avail_bytes == 0) { | |
+ if (atomic_read(&d->sender_has_shutdown) != 0) | |
+ { | |
+ if (avail_bytes == 0) | |
+ { | |
copied = 0; | |
break; | |
} | |
} | |
/* Block if the buffer is empty */ | |
- if (bytes == 0) { | |
- if (copied > target) { | |
- break; | |
- } | |
+ if (bytes == 0) | |
+ { | |
+ // if (copied > target) { | |
+ // break; | |
+ // } | |
- timeo = receive_data_wait(x, timeo); | |
+ //timeo = receive_data_wait(x, timeo); | |
/* if (signal_pending(current)) { | |
rc = sock_intr_errno(timeo); | |
DPRINTK("error: signal\n"); | |
goto err; | |
} | |
continue; */ | |
+ continue; | |
} | |
/* Perform the read */ | |
- if ((recv_offset + bytes) > max_offset) { | |
+ if ((recv_offset + bytes) > max_offset) | |
+ { | |
/* wrap around, need to perform the read twice */ | |
unsigned int bytes_segment1 = max_offset - recv_offset; | |
unsigned int bytes_segment2 = bytes - bytes_segment1; | |
- memcpy(data+copied, (unsigned char *)(x->buffer_addr + recv_offset), | |
- bytes_segment1); | |
- memcpy(data+copied+bytes_segment1, (unsigned char *)(x->buffer_addr), | |
- bytes_segment2); | |
+ memcpy(data + copied, (unsigned char *)(x->buffer_addr + recv_offset), | |
+ bytes_segment1); | |
+ memcpy(data + copied + bytes_segment1, (unsigned char *)(x->buffer_addr), | |
+ bytes_segment2); | |
} | |
- else { | |
+ else | |
+ { | |
/* no wrap around, proceed with one copy */ | |
- memcpy(data+copied, (unsigned char *)(x->buffer_addr + recv_offset), | |
- bytes); | |
+ memcpy(data + copied, (unsigned char *)(x->buffer_addr + recv_offset), | |
+ bytes); | |
} | |
/* Update values */ | |
@@ -793,7 +913,8 @@ xen_recvmsg (struct xen_sock *x, void *data, size_t size) { | |
d->recv_offset = (recv_offset + bytes) % max_offset; | |
d->total_bytes_received += bytes; | |
atomic_add(bytes, &d->avail_bytes); | |
- if (d->sender_is_blocking) { | |
+ if (d->sender_is_blocking) | |
+ { | |
notify_remote_via_evtchn(x->evtchn_local_port); | |
} | |
} | |
@@ -802,92 +923,129 @@ xen_recvmsg (struct xen_sock *x, void *data, size_t size) { | |
return copied; | |
} | |
-static inline int | |
-is_readable (struct descriptor_page *d) { | |
- unsigned int max_offset = (1 << d->buffer_order) * PAGE_SIZE; | |
- unsigned int avail_bytes = max_offset - atomic_read(&d->avail_bytes); | |
- if (avail_bytes > 0) | |
- return 1; | |
+/************************************************************************ | |
+ * Data reception functions (server-only in a one-way communication | |
+ * channel, but common to both in a two-way channel). | |
+ ***********************************************************************/ | |
- return 0; | |
+ | |
+void inline read_data(struct xen_sock *x, void *data, size_t len) | |
+{ | |
+ struct descriptor_page *d = x->descriptor_addr; | |
+ unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
+ | |
+ unsigned int recv_offset = d->recv_offset; | |
+ if ((recv_offset + len) > max_offset) | |
+ { | |
+ /* wrap around, need to perform the read twice */ | |
+ unsigned int bytes_segment1 = max_offset - recv_offset; | |
+ unsigned int bytes_segment2 = len - bytes_segment1; | |
+ memcpy(data, | |
+ (unsigned char *)(x->buffer_addr + recv_offset), | |
+ bytes_segment1); | |
+ memcpy(data + bytes_segment1, | |
+ (unsigned char *)(x->buffer_addr), | |
+ bytes_segment2); | |
+ } | |
+ else | |
+ { | |
+ /* no wrap around, proceed with one copy */ | |
+ memcpy(data, (unsigned char *)(x->buffer_addr + recv_offset), | |
+ len); | |
+ } | |
} | |
-static long | |
-receive_data_wait (struct xen_sock *x, long timeo) { | |
+int xensock_rx(struct xen_sock *x) | |
+{ | |
struct descriptor_page *d = x->descriptor_addr; | |
-// DEFINE_WAIT(wait); | |
+ int copied = 0; | |
+ unsigned int max_offset = (1 << x->buffer_order) * PAGE_SIZE; | |
+ unsigned int recv_offset = d->recv_offset; | |
+ uint32_t bytes, bytes_len = sizeof(bytes); | |
+ uint8_t pkt[4000]; | |
- TRACE_ENTRY; | |
+ if (!rx_work_todo) | |
+ { | |
+ int64_t deadline = NOW() + MICROSECS(CONFIG_NETFRONT_POLLTIMEOUT); | |
+ for (;;) | |
+ { | |
+ wait_event_deadline(rx_queue, rx_work_todo > 0, deadline); | |
+ if (rx_work_todo || (deadline && NOW() >= deadline)) | |
+ { | |
+ break; | |
+ } | |
+ } | |
+ } | |
+ | |
+ unsigned int avail_bytes = max_offset - atomic_read(&d->avail_bytes); | |
+ /* bytes available for read */ | |
+ while (avail_bytes > 4) | |
+ { | |
+ | |
+ printk("Reading %d data\n", avail_bytes); | |
+ /* Determine the maximum amount that can be read */ | |
+ //bytes = min(size - copied, avail_bytes); | |
+ | |
+ if (atomic_read(&d->sender_has_shutdown) != 0) | |
+ { | |
+ if (avail_bytes == 0) | |
+ { | |
+ copied = 0; | |
+ break; | |
+ } | |
+ } | |
+ | |
+ /* Perform the read of the packet size */ | |
- for (;;) { | |
-/* prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | |
- if (is_readable(d) | |
- || (atomic_read(&d->sender_has_shutdown) != 0) | |
- || !skb_queue_empty(&sk->sk_receive_queue) | |
- || sk->sk_err | |
- || (sk->sk_shutdown & RCV_SHUTDOWN) | |
- || signal_pending(current) | |
- || !timeo) { | |
+ read_data(x, &bytes, bytes_len); | |
+ if (avail_bytes < bytes + bytes_len) | |
+ { | |
+ copied = 0; | |
break; | |
} | |
- timeo = schedule_timeout(timeo); */ | |
- break; | |
- } | |
+ /* Perform the read of the actual packet now */ | |
+ /* TODO Need to think how to align packets to pages in | |
+ order to avoid extra copy*/ | |
+ read_data(x, pkt, bytes); | |
+ /* Push packet to clickos */ | |
+ x->fn_rx(pkt, bytes, x->fn_rx_arg); | |
-// finish_wait(sk->sk_sleep, &wait); | |
+ /* Update values */ | |
+ copied += bytes; | |
+ d->recv_offset = (recv_offset + bytes) % max_offset; | |
+ d->total_bytes_received += bytes; | |
+ atomic_add(bytes, &d->avail_bytes); | |
+ rx_work_todo &= (~x->sockid); | |
+ notify_remote_via_evtchn(x->evtchn_local_port); | |
+ avail_bytes = max_offset - atomic_read(&d->avail_bytes); | |
+ } | |
- TRACE_EXIT; | |
- return timeo; | |
+ return copied; | |
} | |
static void | |
-server_interrupt (evtchn_port_t irq, struct pt_regs *regs, void *dev_id) { | |
-// struct xen_sock *x = dev_id; | |
+server_interrupt(evtchn_port_t irq, struct pt_regs *regs, void *dev_id) | |
+{ | |
+ struct xen_sock *x = dev_id; | |
TRACE_ENTRY; | |
- | |
-/* if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | |
- wake_up_interruptible(sk->sk_sleep); | |
- } */ | |
+ u_int flags; | |
+ local_irq_save(flags); | |
+ rx_work_todo |= x->sockid; | |
+ wake_up(&rx_queue); | |
+ local_irq_restore(flags); | |
TRACE_EXIT; | |
} | |
-// static int | |
-// local_memcpy_toiovecend (struct iovec *iov, unsigned char *kdata, int offset, int len) { | |
-// int err = -EFAULT; | |
-// | |
-// /* Skip over the finished iovecs */ | |
-// while (offset >= iov->iov_len) { | |
-// offset -= iov->iov_len; | |
-// iov++; | |
-// } | |
-// | |
-// while (len > 0) { | |
-// u8 *base = iov->iov_base + offset; | |
-// int copy = min((unsigned int)len, iov->iov_len - offset); | |
-// | |
-// offset = 0; | |
-// if (copy_to_user(base, kdata, copy)) { | |
-// goto out; | |
-// } | |
-// kdata += copy; | |
-// len -= copy; | |
-// iov++; | |
-// } | |
-// err = 0; | |
-// | |
-// out: | |
-// return err; | |
-// } */ | |
- | |
/************************************************************************ | |
* Connection teardown functions (common to both server and client). | |
************************************************************************/ | |
static int | |
-xen_release (struct xen_sock *x) { | |
+xen_release(struct xen_sock *x) | |
+{ | |
//struct sock *sk = sock->sk; | |
//struct xen_sock *x; | |
struct descriptor_page *d; | |
@@ -905,21 +1063,26 @@ xen_release (struct xen_sock *x) { | |
if (x->descriptor_handle == -1) | |
goto out; | |
- if (x->is_server) { | |
- while (atomic_read(&d->sender_has_shutdown) == 0 ) { | |
+ if (x->is_server) | |
+ { | |
+ while (atomic_read(&d->sender_has_shutdown) == 0) | |
+ { | |
} | |
server_unallocate_buffer_pages(x); | |
server_unallocate_descriptor_page(x); | |
} | |
- if (x->is_client) { | |
- if ((atomic_read(&d->sender_has_shutdown)) == 0) { | |
+ if (x->is_client) | |
+ { | |
+ if ((atomic_read(&d->sender_has_shutdown)) == 0) | |
+ { | |
client_unmap_buffer_pages(x); | |
client_unmap_descriptor_page(x); | |
notify_remote_via_evtchn(x->evtchn_local_port); | |
} | |
- else { | |
+ else | |
+ { | |
printk(" xen_release: SENDER ALREADY SHUT DOWN!\n"); | |
} | |
} | |
@@ -929,102 +1092,11 @@ out: | |
return 0; | |
} | |
-static void | |
-server_unallocate_buffer_pages (struct xen_sock *x) { | |
- if (x->buffer_grefs) { | |
- int buffer_num_pages = (1 << x->buffer_order); | |
- int i; | |
- | |
- for (i = 0; i < buffer_num_pages; i++) { | |
- if (x->buffer_grefs[i] == -ENOSPC) { | |
- break; | |
- } | |
- | |
- gnttab_end_access(x->buffer_grefs[i]); | |
- x->buffer_grefs[i] = -ENOSPC; | |
- } | |
- | |
- free(x->buffer_grefs); | |
- x->buffer_grefs = NULL; | |
- } | |
- | |
- if (x->buffer_addr) { | |
- struct descriptor_page *d = x->descriptor_addr; | |
- | |
- free_pages((void *)x->buffer_addr, x->buffer_order); | |
- x->buffer_addr = 0; | |
- x->buffer_order = -1; | |
- if (d) { | |
- d->buffer_order = -1; | |
- } | |
- } | |
-} | |
- | |
-static void | |
-server_unallocate_descriptor_page (struct xen_sock *x) { | |
- if (x->descriptor_gref != -ENOSPC) { | |
- gnttab_end_access(x->descriptor_gref); | |
- x->descriptor_gref = -ENOSPC; | |
- } | |
- if (x->descriptor_addr) { | |
- free_page((void *)(x->descriptor_addr)); | |
- x->descriptor_addr = NULL; | |
- } | |
-} | |
- | |
-static void | |
-client_unmap_buffer_pages (struct xen_sock *x) { | |
- | |
- if (x->buffer_handles) { | |
- struct descriptor_page *d = x->descriptor_addr; | |
- int buffer_order = d->buffer_order; | |
- int buffer_num_pages = (1 << buffer_order); | |
- int i; | |
- struct gnttab_unmap_grant_ref op; | |
- int rc = 0; | |
- | |
- for (i = 0; i < buffer_num_pages; i++) { | |
- if (x->buffer_handles[i] == -1) { | |
- break; | |
- } | |
- | |
- memset(&op, 0, sizeof(op)); | |
- op.host_addr = x->buffer_addr + i * PAGE_SIZE; | |
- op.handle = x->buffer_handles[i]; | |
- op.dev_bus_addr = 0; | |
- | |
- rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); | |
- if (rc == -ENOSYS) { | |
- printk("Failure to unmap grant reference \n"); | |
- } | |
- } | |
- | |
- free(x->buffer_handles); | |
- x->buffer_handles = NULL; | |
- } | |
-} | |
- | |
-static void | |
-client_unmap_descriptor_page (struct xen_sock *x) { | |
- struct descriptor_page *d; | |
- int rc = 0; | |
- | |
- d = x->descriptor_addr; | |
- | |
- if (x->descriptor_handle != -1) { | |
- struct gnttab_unmap_grant_ref op; | |
- | |
- memset(&op, 0, sizeof(op)); | |
- op.host_addr = (unsigned long)x->descriptor_addr; | |
- op.handle = x->descriptor_handle; | |
- op.dev_bus_addr = 0; | |
- | |
- atomic_set(&d->sender_has_shutdown, 1); | |
- rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1); | |
- if (rc == -ENOSYS) { | |
- printk("Failure to unmap grant reference for descriptor page\n"); | |
- } | |
- | |
- x->descriptor_handle = -1; | |
- } | |
+void xensock_set_rx_handler(struct xen_sock *dev, | |
+ void (*thesock_rx)(unsigned char *data, int len, void *arg), | |
+ void *arg) | |
+{ | |
+ dev->fn_rx = thesock_rx; | |
+ dev->fn_rx_arg = arg; | |
+ return; | |
} | |
-- | |
2.7.4 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment