[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] add FS backend/frontend
What's the usage scenario? -- Keir On 17/1/08 16:24, "Samuel Thbault" <samuel.thibault@xxxxxxxxxxxxx> wrote: > add FS backend/frontend > > Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx> > Signed-off-by: Grzegorz Milos <gm281@xxxxxxxxx> > > # HG changeset patch > # User Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx> > # Date 1200586950 0 > # Node ID b0e2c382ffb2df8485a08daf17a8e0e85436e17d > # Parent c4babfc157d51322e7167a7a84e0462ea3ccedbe > > add FS backend/frontend > > diff -r c4babfc157d5 -r b0e2c382ffb2 extras/mini-os/fs-front.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/extras/mini-os/fs-front.c Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,1129 @@ > +/**************************************************************************** > ** > + * fs-front.c > + * > + * Frontend driver for FS split device driver. > + * > + * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc. > + * > + * Permission is hereby granted, free of charge, to any person obtaining a > copy > + * of this software and associated documentation files (the "Software"), to > + * deal in the Software without restriction, including without limitation the > + *rights to use, copy, modify, merge, publish, distribute, sublicense, > and/or > + * sell copies of the Software, and to permit persons to whom the Softwre is > + * furnished to do so, subject to the following conditions: > + * > + * The above copyright notice and this permission notice shall be included in > + * all copies or substantial portions of the Software. > + * > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL > THE > + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING > + * FROM, OUT OF OR IN CONNECTION WIT THE SOFTWARE OR THE USE OR OTHER > + * DEALINGS IN THE SOFTWARE. > + */ > + > +#undef NDEBUG > +#include <os.h> > +#include <list.h> > +#include <xmalloc.h> > +#include <xenbus.h> > +#include <gnttab.h> > +#include <events.h> > +#inclue <xen/io/fsif.h> > +#include <fs.h> > +#include <sched.h> > + > +#define preempt_disable() > +#define preempt_enable() > +#define cmpxchg(p,o,n) synch_cmpxchg(p,o,n) > + > + > +#ifdef FS_DEBUG > +#define DEBUG(_f, _a...) \ > + printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a) > +#else > +#define DEBUG(_f, _a...) ((void)0) > +#endif > + > + > +struct fs_request; > +struct fs_import *fs_import; > + > +/**************************************************************************** > **/ > +/* RING REQUEST/RESPONSES HANDLING > */ > +/**************************************************************************** > **/ > + > +struct fs_request > +{ > + void *page; > + grant_ref_t gref; > + struct thread *thread; /* Thread blocked on this request > */ > + struct fsif_response shadow_rsp; /* Response copy writen by the > + interrupt handler */ > +}; > + > +/* Ring operations: > + FSIF ring is used differently to Linux-like split devices. This stems from > + * the fact that no I/O request queue is present. The use of some of the > macros > + * defined in ring.h is not allowed, in particular: > + * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used. > + * > + * The protocol used for FSIF ring is described below: > + * > + * In order to reserve a request the frontend: > + * a) saves current frontend_ring->rq_prod_pvt into a local variable > + * b) checks that there are free request using the local req_prod_pvt > + * c) tries to reserve the request using cmpxchg on > frontend_ring->req_prod_pvt > + * if cmpxchg fails, it means that somene reserved the request, start > from > + * a) > + * > + * In order to commit a request to the shared ring: > + * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1 > + * Loop if unsuccessful. > + * NOTE: Request should be commited to the shared rig as quickly as > possible, > + * because otherwise other threads might busy loop trying to commit > next > + * requests. It also follows that preemption should be disabled, if > + * possible, for the duration of the request construction. > + */ > + > +/* Number of free requests (for use on front side only). */ > +#define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \ > + (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons)) > + > + > + > +static RING_IDX reserve_fsif_request(struct fs_import *import) > +{ > + RING_IDX idx; > + > + down(&import->reqs_sem); > + preempt_disable(); > +again: > + /* We will attempt to reserve slot idx */ > + idx = import-ring.req_prod_pvt; > + ASERT (FS_RING_FREE_REQUESTS(&import->ring, idx)); > + /* Attempt to resere */ > + if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx) > + goto again; > + > + return idx; > +} > + > +static void commit_fsif_request(struct fs_import *import, RING_IDX idx) > +{ > + while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx) > + { > + printk("Failed to commit a request: req_prod=%d, idx=%d\n", > + import->ring.sring->req_prod, idx); > + } > + preempt_enable(); > + > + /* NOTE: we cannot do anything clever about rsp_event, to hold off > + * notifications, because we don't know if we are a single request (in > which > + * case we have to notify always), or a part of a larger request grou > + * (when, in some cases, notification isn't required) */ > + notify_remote_via_evtchn(import->local_port); > +} > + > + > + > +static inline void add_id_to_freelist(unsigned int id,unsigned short* > freelist) > +{ > + unsigned int old_id, new_id; > + > +again: > + old_id = freelist[0]; > + /* Note: temporal inconsistency, since freelist[0] can be changed by > someone > + * else, but we are a sole owner of freelist[id], it's OK. */ > + freelist[id] = old_id; > + new_id = id; > + if(cmpxchg(&freelist[0], old_id, new_id) != old_id) > + { > + printk("Cmpxchg on freelist add failed.\n"); > + goto again; > + } > +} > + > +/* always call reserve_fsif_request(import) before this, to protect from > + * depletion. */ > +static inline unsigned short get_id_from_freelist(unsigned short* freelist) > +{ > + unsigned int old_id, new_i; > + > +again: > + old_id = freelist[0]; > new_id = freelist[old_id; > + if(cmpxchg(&freelist[0], old_id, new_id) != old_id) > + { > + printk("Cmpxchg on freelistremove failed.\n"); > + goto again; > + } > + > + return old_i; > +} > + > +/*************************************************************************** > **/ > +/* END OF RING REQUEST/RESPONSES HADLING > */ > +/**************************************************************************** > **/ > + > + > + > +/**************************************************************************** > **/ > +/* INDIVIDUAL FILE OPERATIONS > */ > +/**************************************************************************** > **/ > +int fs_open(struct fs_import *import, char *file) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int fd; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend equest id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_open call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s", file); > + > + req = RING_GET_REQUEST(import->ring, back_req_id); > + req->type = REQ_FILE_OPEN; > + req->id = priv_req_id; > + req->u.fopen.gref = fsr->gref; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + fd = (int)fsr->shadow_rsp.ret_val; > + DEBUG("The following FD returned: %d\n", fd); > + add_id_to_frelist(priv_req_id, import->feelist); > + > + return fd; > +} > + > +int fs_close(struct fs_import *mport, int fd) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_close call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current;> + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_FILE_CLOSE; > + req->id = priv_req_id; > + req->u.fclose.fd = fd; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsf_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("Close returned: %d\n", ret); > + add_id_to_freelist(priv_req_id, impor->freelist); > + > + return ret; > +} > + > +ssize_t fs_read(struct fs_import *import, int fd, void *buf, > + ssize_t len, ssize_t offset) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + ssize_t ret; > + > + BUG_ON(len > PAGE_SIZE); > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id or fs_read call is: %d\n", piv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread= current; > + memset(fsr->page, 0, PAGE_SIZE); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_FILE_READ; > + req->id = priv_req_id; > + req->u.fread.fd = fd; > + req->u.fread.gref = fsr->gref; > + req->u.fread.len = len; > + req->u.fread.offset = offset; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (ssize_t)fsr->shadow_rsp.ret_val; > + DEBUG("The following ret value returned %d\", ret); > + if(ret > 0) > + memcpy(buf, fsr->page, ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +ssize_t fs_writ(struct fs_import *import, int fd, void *buf, > + ssize_t len, ssize_t offset) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + ssize_t ret; > + > + BUG_ON(len > PAGE_SIZE); > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare ur private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_read call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + memcpy(fsr->page, buf, len); > + BUG_ON(len > PAGE_SIZE); > + memset((char *)fsr->page + len, 0, PAGE_SIZE - len); > + + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_FILE_WRITE; > + req->id = priv_req_id; > + req->u.fwrite.fd = fd; > + req->u.fwrite.gref = fsr->gref; > + req->u.fwrite.len = len; > + req->u.fwrite.offset = offset; > + > + /* Set blocked flag before commitin the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (ssize_t)fsr->shadow_rsp.ret_val; > + DEBUG("The following ret value returned %d\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); + > + return ret; > +} > + > +int fs_stat(struct fs_import *import, > + int fd, > + struct fsif_stat_response *stat) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id;> + struct fsif_request *req; > + int ret; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request i for fs_stat call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + memset(fsr->page, 0, PAGE_SIZE); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_STAT; > + req->id = priv_req_id; > + req->u.fstat.fd = fd; > + req->u.fstat.gref = fsr->gref; > + > + /* Set blocked flag beforecommiting the request, thus voiding missed > + * response race */ > + block(current); > + comit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("Following ret from fstat: %d\n", ret); > + memcpy(stat, fsr->page, sizeof(struct fsif_stat_response)); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +int fs_truncate(struct fs_import *import, > + int fd, > + int64_t length) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend reuest id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + > + req = RING_GET_REQEST(&import->ring, back_req_id); > + req->type = REQ_FILE_TRUNCATE; > + req->id = priv_req_id; > + req->u.ftruncate.fd = fd; > + req->u.ftruncate.length = length; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("Following ret from ftruncate: %d\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +int fs_remove(struct fs_import *import, char *file) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + > + /* repare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->ref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_open call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s", file); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_REMOVE; > + req->id = priv_req_id; > + req->u.fremove.gref = fsr->gref; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("The following ret: %d\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > + > +int fs_rename(struct fs_import *import, > + char *old_file_name, > + char *new_file_name) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + char old_header[] = "old: "; > + char new_header[] = "new: "; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_open call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s%s%c%s%s", > + old_header, old_file_name, '\0', new_header, new_file_name); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_RENAME; > + req->id = priv_req_id; > + req->u.frename.gref = fsr->gref; > + req->u.frename.old_name_offset = strlen(old_header); > + req->u.frename.new_name_offset = strlen(old_header) + > + strlen(old_file_name) + > + strlen(new_header) + > + 1 /* Accouning for the additional > + end of string character */; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("The following ret: %d\n", ret); > + add_id_to_freeist(priv_req_id, import->freelist); > + + return ret; > +} > + > +int fs_crate(struct fs_import *import, char *name > + int8_t directory, int2_t mode) > +{ > + struct fs_request *fs; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsi_request *req; > + int ret; > + > + /* Prepare request for the backend */ >+ back_req_id = reserve_fsif_request(mport); > + DEBUG("Backend request id%d, gref=%d\n", back_req_id, fsr->gref) > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_create call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s", name); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_CREATE; > + req->id = priv_req_id; > + req->u.fcreate.gref = fsr->gref; > + req->u.fcreate.directory = directory; > + req->u.fcreate.mode = mode; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("The following ret: %d\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +char** fs_list(struct fs_import *imp char *name, > + int32_t offset, int32_t *nr_files, int *has_more) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + char **files, *current_file; > + int i; > + > + DEBUG("Different masks: NRFILES=(%llx, %d), ERROR=(%llx, %d), > HAS_MORE(%llx, %d)\n", > + NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, > HAS_MORE_FLAG, HAS_MORE_SHIFT); > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_list call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s", name); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_DIR_LIST; > + req->id = priv_req_id; > + req->u.flist.gref = fsr->gref; > + req->u.flist.offset = offset; > + > /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */> + block(current); > + commit_fsif_rquest(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT; > + files = NULL; > + if(*nr_files <= 0) goto exit; > + files = malloc(sizeof(char*) * (*nr_files)); > + current_file = fsr->page; > + for(i=0; i<*nr_files; i++) > + { > + files[i] = strdup(current_file); > + current_file += strlen(current_file) + 1; > + } > + if(has_more != NULL) > + *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG; > + add_id_to_freelist(priv_req_id, import->freelist); > +exit: > + return files; > +} > + > +int fs_chmod(struct fs_import *import, int fd, int32_t mode) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + >+ /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structre */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_CHMOD; > + req->id = priv_req_id; > + req->u.fchmod.fd = fd; > + req->u.fchmod.mode = mode; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("The following returned: %d\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +int64_t fs_space(struct fs_import *import, char *location) > +{ > + strct fs_request *fsr; > + unsigned shortq_id; > + RING_ID_request *req; > + int64_t ret; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_space is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + sprintf(fsr->page, "%s", location); > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_FS_SPACE; > + req->id = privreq_id; > + req->u.fspace.gref = fsr->gref; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int64_t)fsr->shadow_rsp.ret_val; > + DEBUG("The following returned: %lld\n", ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > +int fs_sync(struct fs_import *import, int fd) > +{ > + struct fs_request *fsr; > + unsigned short priv_req_id; > + RING_IDX back_req_id; > + struct fsif_request *req; > + int ret; > + > + /* Prepare request for the backend */ > + back_req_id = reserve_fsif_request(import); > + DEBUG("Backed request id=%d, gref=%d\n", back_req_id, fsr->gref); > + > + /* Prepare our private request structure */ > + priv_req_id = get_id_from_freelist(import->freelist); > + DEBUG("Request id for fs_sync call is: %d\n", priv_req_id); > + fsr = &import->requests[priv_req_id]; > + fsr->thread = current; > + > + req = RING_GET_REQUEST(&import->ring, back_req_id); > + req->type = REQ_FILE_SYNC; > + req->id = priv_req_id; > + req->u.fsync.fd = fd; > + > + /* Set blocked flag before commiting the request, thus avoiding missed > + * response race */ > + block(current); > + commit_fsif_request(import, back_req_id); > + schedule(); > + > + /* Read the response */ > + ret = (int)fsr->shadow_rsp.ret_val; > + DEBUG("Close returned: %d\n" ret); > + add_id_to_freelist(priv_req_id, import->freelist); > + > + return ret; > +} > + > + > +/**************************************************************************** > **/ > +/* END OF INDIVIDUAL FILE OPERATIONS > */ > +/**************************************************************************** > / > + > + > +static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void > *data) > +{ > + struct fs_import *import = (struct fs_import*)data; > + static int in_irq = 0; > + RING_IDX cons, rp; > + int more; > + > + /* Check for non-reentrance */ > + BUG_ON(in_irq); > + in_irq = 1; > + > + DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id); > +moretodo: > + rp = import->ring.sring->req_prod; > + rmb(); /* Ensure we see queued responses up to 'rp'. */ > + cons = import->ring.rsp_cons; > + while (cons != rp) > + { > + struct fsif_response *rsp; > + struct fs_request *req; > + > + rsp = RING_GET_RESPONSE(&import->ring, cons); > + DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n", > + import->ring.rsp_cons, rsp->id, rsp->ret_val); > + req = &import->requests[rsp->id]; > + memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response)); > + DEBUG("Waking up: %s\n", req->thread->name); > + wake(req->thread); > + > + cons++; > + up(&import->reqs_sem); > + } > + > + import->ring.rsp_cons = rp; > + RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more); > + if(more) goto moretodo; > + > + in_irq = 0; > +} > + > +/* Small utility function to figure out our domain id */ > +static domid_t get_self_id(void) > +{ > + char *dom_id; > + domid_t ret; > + > + BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id)); > + sscanf(dom_id, "%d", &ret); > + > + return ret; > +} > + > +static void alloc_request_table(struct fs_import *import) > +{ > + struct fs_request *requests; > + int i; > + > + BUG_ON(import->nr_entries <= 0); > + printk("Allocating request array for import %d, nr_entries = %d.\n", > + import->import_id, import->nr_entries); > + requests = xmalloc_array(struct fs_request, import->nr_entries); > + import->freelist = xmalloc_array(unsigned short, import->nr_entries); > + memset(import->freelist, 0, sizeof(unsigned short) * import->nr_entries); > + for(i=0; i<import->nr_entries; i++) > + { > + /* TODO: that's a lot of memory */ > + requests[i].page = (void *)alloc_page(); > + requests[i].gref = gnttab_grant_access(import->dom_id, > + virt_to_mfn(requests[i].page), > + 0); > + //printk(" ===>> Page=%lx, gref=%d, mfn=%lx\n", requests[i].page, > requests[i].gref, virt_to_mfn(requests[i].page)); > + add_id_to_freelist(i, import->freelist); > + } > + import->requests = requests; > +} > + > + > +/**************************************************************************** > **/ > +/* FS TESTS > */ > +/**************************************************************************** > **/ > + > + > +void test_fs_import(void *data) > +{ > + struct fs_import *import = (struct fs_import *)data; > + int ret, fd, i; > + int32_t nr_files; > + char buffer[1024]; > + ssize_t offset; > + char **files; > + long ret64; > + > + /* Sleep for 1s and then try to open a file */ > + sleep(1000); > + ret = fs_create(import, "mini-os-created-directory", 1, 0777); > + printk("Directory create: %d\n", ret); > + > + ret = fs_create(import, "mini-os-created-directory/mini-os-created-file", > 0, 0666); > + printk("File create: %d\n", ret); > + > + fd = fs_open(import, "mini-os-created-directory/mini-os-created-file"); > + printk("File descriptor: %d\n", fd); > + if(fd < 0) return; > + > + offset = 0; > + for(i=0; i<10; i++) > + { > + sprintf(buffer, "Current time is: %lld\n", NOW()); > + ret = fs_write(import, fd, buffer, strlen(buffer), offset); > + printk("Writen current time (%d)\n", ret); > + if(ret < 0) > + return; > + offset += ret; > + } > + > + ret = fs_close(import, fd); > + printk("Closed fd: %d, ret=%d\n", fd, ret); > + > + printk("Listing files in /\n"); > + files = fs_list(import, "/", 0, &nr_files, NULL); > + for(i=0; i<nr_files; i++) > + printk(" files[%d] = %s\n", i, files[i]); > + > + ret64 = fs_space(import, "/"); > + printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20)); > + > +} > + > +#if 0 > +// char *content = (char *)alloc_page(); > + int fd, ret; > +// int read; > + char write_string[] = "\"test data written from minios\""; > + struct fsif_stat_response stat; > + char **files; > + int32_t nr_files, i; > + int64_t ret64; > + > + > + fd = fs_open(import, "test-export-file"); > +// read = fs_read(import, fd, content, PAGE_SIZE, 0); > +// printk("Read: %d bytes\n", read); > +// content[read] = '\0'; > +// printk("Value: %s\n", content); > + ret = fs_write(import, fd, write_string, strlen(write_string), 0); > + printk("Ret after write: %d\n", ret); > + ret = fs_stat(import, fd, &stat); > + printk("Ret after stat: %d\n", ret); > + printk(" st_mode=%o\n", stat.stat_mode); > + printk(" st_uid =%d\n", stat.stat_uid); > + printk(" st_gid =%d\n", stat.stat_gid); > + printk(" st_size=%ld\n", stat.stat_size); > + printk(" st_atime=%ld\n", stat.stat_atime); > + printk(" st_mtime=%ld\n", stat.stat_mtime); > + printk(" st_ctime=%ld\n", stat.stat_ctime); > + ret = fs_truncate(import, fd, 30); > + printk("Ret after truncate: %d\n", ret); > + ret = fs_remove(import, "test-to-remove/test-file"); > + printk("Ret after remove: %d\n", ret); > + ret = fs_remove(import, "test-to-remove"); > + printk("Ret after remove: %d\n", ret); > + ret = fs_chmod(import, fd, 0700); > + printk("Ret after chmod: %d\n", ret); > + ret = fs_sync(import, fd); > + printk("Ret after sync: %d\n", ret); > + ret = fs_close(import, fd); > + //ret = fs_rename(import, "test-export-file", > "renamed-test-export-file"); > + //printk("Ret after rename: %d\n", ret); > + ret = fs_create(import, "created-dir", 1, 0777); > + printk("Ret after dir create: %d\n", ret); > + ret = fs_create(import, "created-dir/created-file", 0, 0777); > + printk("Ret after file create: %d\n", ret); > + files = fs_list(import, "/", 15, &nr_files, NULL); > + for(i=0; i<nr_files; i++) > + printk(" files[%d] = %s\n", i, files[i]); > + ret64 = fs_space(import, "created-dir"); > + printk("Ret after space: %lld\n", ret64); > + > +#endif > + > + > +/**************************************************************************** > **/ > +/* END OF FS TESTS > > */ > +/**************************************************************************** > **/ > + > +static int init_fs_import(struct fs_import *import) > +{ > + char *err; > + xenbus_transaction_t xbt; > + char nodename[1024], r_nodename[1024], token[128], *message = NULL; > + struct fsif_sring *sring; > + int retry = 0; > + domid_t self_id; > + > + printk("Initialising FS fortend to backend dom %d\n", import->dom_id); > + /* Allocate page for the shared ring */ > + sring = (struct fsif_sring*) alloc_page(); > + memset(sring, 0, PAGE_SIZE); > + > + /* Init the shared ring */ > + SHARED_RING_INIT(sring); > + > + /* Init private frontend ring */ > + FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE); > + import->nr_entries = import->ring.nr_ents; > + > + /* Allocate table of requests */ > + alloc_request_table(import); > + init_SEMAPHORE(&import->reqs_sem, import->nr_entries); > + > + /* Grant access to the shared ring */ > + import->gnt_ref = gnttab_grant_access(import->dom_id, > virt_to_mfn(sring), > 0); > + > + /* Allocate event channel */ > + BUG_ON(evtchn_alloc_unbound(import->dom_id, > + fsfront_handler, > + //ANY_CPU, > + import, > + &import->local_port)); > + > + > + self_id = get_self_id(); > + /* Write the frontend info to a node in our Xenbus */ > + sprintf(nodename, "/local/domain/%d/device/vfs/%d", > + self_id, import->import_id); > + > +again: > + err = xenbus_transaction_start(&xbt); > + if (err) { > + printk("starting transaction\n"); > + } > + > + err = xenbus_printf(xbt, > + nodename, > + "ring-ref", > + "%u", > + import->gnt_ref); > + if (err) { > + message = "writing ring-ref"; > + goto abort_transaction; > + } > + > + err = xenbus_printf(xbt, > + nodename, > + "event-channel", > + "%u", > + import->local_port); > + if (err) { > + message = "writing event-channel"; > + goto abort_transaction; > + } > + > + err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef); > + > + > + err = xenbus_transaction_end(xbt, 0, &retry); > + if (retry) { > + goto again; > + printk("completing transaction\n"); > + } > + > + /* Now, when our node is prepared we write request in the exporting > domain > + * */ > + printk("Our own id is %d\n", self_id); > + sprintf(r_nodename, > + "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend", > + import->dom_id, self_id, import->export_id); > + BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename)); > + > + goto done; > + > +abort_transaction: > + xenbus_transaction_end(xbt, 1, &retry); > + > +done: > + > +#define WAIT_PERIOD 10 /* Wait period in ms */ > +#define MAX_WAIT 10 /* Max number of WAIT_PERIODs */ > + import->backend = NULL; > + sprintf(r_nodename, "%s/backend", nodename); > + > + for(retry = MAX_WAIT; retry > 0; retry--) > + { > + xenbus_read(XBT_NIL, r_nodename, &import->backend); > + if(import->backend) > + { > + printk("Backend found at %s\n", import->backend); > + break; > + } > + sleep(WAIT_PERIOD); > + } > + > + if(!import->backend) > + { > + printk("No backend available.\n"); > + /* TODO - cleanup datastructures/xenbus */ > + return 0; > + } > + sprintf(r_nodename, "%s/state", import->backend); > + sprintf(token, "fs-front-%d", import->import_id); > + /* The token will not be unique if multiple imports are inited */ > + xenbus_watch_path(XBT_NIL, r_nodename/*, token*/); > + xenbus_wait_for_value(/*token,*/ r_nodename, STATE_READY); > + printk("Backend ready.\n"); > + > + //create_thread("fs-tester", test_fs_import, import); > + > + return 1; > +} > + > +static void add_export(struct list_head *exports, unsigned int domid) > +{ > + char node[1024], **exports_list = NULL, *ret_msg; > + int j = 0; > + static int import_id = 0; > + > + sprintf(node, "/local/domain/%d/backend/vfs/exports", domid); > + ret_msg = xenbus_ls(XBT_NIL, node, &exports_list); > + if (ret_msg && strcmp(ret_msg, "ENOENT")) > + printk("couldn't read %s: %s\n", node, ret_msg); > + while(exports_list && exports_list[j]) > + { > + struct fs_import *import; > + int export_id = -1; > + > + sscanf(exports_list[j], "%d", &export_id); > + if(export_id >= 0) > + { > + import = xmalloc(struct fs_import); > + import->dom_id = domid; > + import->export_id = export_id; > + import->import_id = import_id++; > + INIT_LIST_HEAD(&import->list); > + list_add(&import->list, exports); > + } > + free(exports_list[j]); > + j++; > + } > + if(exports_list) > + free(exports_list); > + if(ret_msg) > + free(ret_msg); > +} > + > +#if 0 > +static struct list_head* probe_exports(void) > +{ > + struct list_head *exports; > + char **node_list = NULL, *msg = NULL; > + int i = 0; > + > + exports = xmalloc(struct list_head); > + INIT_LIST_HEAD(exports); > + > + msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list); > + if(msg) > + { > + printk("Could not list VFS exports (%s).\n", msg); > + goto exit; > + } > + > + while(node_list[i]) > + { > + add_export(exports, atoi(node_list[i])); > + free(node_list[i]); > + i++; > + } > + > +exit: > + if(msg) > + free(msg); > + if(node_list) > + free(node_list); > + return exports; > +} > +#endif > + > +LIST_HEAD(exports); > + > +void init_fs_frontend(void) > +{ > + struct list_head *entry; > + struct fs_import *import = NULL; > + printk("Initing FS fronend(s).\n"); > + > + //exports = probe_exports(); > + add_export(&exports, 0); > + list_for_each(entry, &exports) > + { > + import = list_entry(entry, struct fs_import, list); > + printk("FS export [dom=%d, id=%d] found\n", > + import->dom_id, import->export_id); > + init_fs_import(import); > + } > + > + fs_import = import; > + > + if (!fs_import) { > + printk("No FS import\n"); > + sleep(1000); > + do_exit(); > + } > +} > diff -r c4babfc157d5 -r b0e2c382ffb2 extras/mini-os/include/fs.h > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/extras/mini-os/include/fs.h Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,51 @@ > +#ifndef __FS_H__ > +#define __FS_H__ > + > +#include <xen/io/fsif.h> > +#include <semaphore.h> > + > +struct fs_import > +{ > + domid_t dom_id; /* dom id of the exporting domain > */ > + u16 export_id; /* export id (exporting dom specific) > */ > + u16 import_id; /* import id (specific to this domain) > */ > + struct list_head list; /* list of all imports > */ > + unsigned int nr_entries; /* Number of entries in rings & request > + array > */ > + struct fsif_front_ring ring; /* frontend ring (contains shared ring) > */ > + int gnt_ref; /* grant reference to the shared ring > */ > + evtchn_port_t local_port; /* local event channel port > */ > + char *backend; /* XenBus location of the backend > */ > + struct fs_request *requests; /* Table of requests > */ > + unsigned short *freelist; /* List of free request ids > */ > + struct semaphore reqs_sem; /* Accounts requests resource > */ > +}; > + > + > +void init_fs_frontend(void); > + > +int fs_open(struct fs_import *import, char *file); > +int fs_close(struct fs_import *import, int fd); > +ssize_t fs_read(struct fs_import *import, int fd, void *buf, > + ssize_t len, ssize_t offset); > +ssize_t fs_write(struct fs_import *import, int fd, void *buf, > + ssize_t len, ssize_t offset); > +int fs_stat(struct fs_import *import, > + int fd, > + struct fsif_stat_response *stat); > +int fs_truncate(struct fs_import *import, > + int fd, > + int64_t length); > +int fs_remove(struct fs_import *import, char *file); > +int fs_rename(struct fs_import *import, > + char *old_file_name, > + char *new_file_name); > +int fs_create(struct fs_import *import, char *name, > + int8_t directory, int32_t mode); > +char** fs_list(struct fs_import *import, char *name, > + int32_t offset, int32_t *nr_files, int *has_more); > +int fs_chmod(struct fs_import *import, int fd, int32_t mode); > +int64_t fs_space(struct fs_import *import, char *location); > +int fs_sync(struct fs_import *import, int fd); > + > +#endif > diff -r c4babfc157d5 -r b0e2c382ffb2 extras/mini-os/include/types.h > --- a/extras/mini-os/include/types.h Thu Jan 17 16:01:24 2008 +0000 > +++ b/extras/mini-os/include/types.h Thu Jan 17 16:22:30 2008 +0000 > @@ -69,4 +69,7 @@ typedef s64 int64_t; > > #define INT_MAX ((int)(~0U>>1)) > #define UINT_MAX (~0U) > + > +typedef long ssize_t; > +typedef unsigned long size_t; > #endif /* _TYPES_H_ */ > diff -r c4babfc157d5 -r b0e2c382ffb2 extras/mini-os/kernel.c > --- a/extras/mini-os/kernel.c Thu Jan 17 16:01:24 2008 +0000 > +++ b/extras/mini-os/kernel.c Thu Jan 17 16:22:30 2008 +0000 > @@ -38,6 +38,7 @@ > #include <xenbus.h> > #include <gnttab.h> > #include <netfront.h> > +#include <fs.h> > #include <xen/features.h> > #include <xen/version.h> > > @@ -85,6 +86,11 @@ static void netfront_thread(void *p) > init_netfront(NULL, NULL, NULL); > } > > +static void fs_thread(void *p) > +{ > + init_fs_frontend(); > +} > + > /* This should be overridden by the application we are linked against. */ > __attribute__((weak)) int app_main(start_info_t *si) > { > @@ -92,6 +98,7 @@ __attribute__((weak)) int app_main(start > create_thread("xenbus_tester", xenbus_tester, si); > create_thread("periodic_thread", periodic_thread, si); > create_thread("netfront", netfront_thread, si); > + create_thread("fs-frontend", fs_thread, si); > return 0; > } > > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/Makefile > --- a/tools/Makefile Thu Jan 17 16:01:24 2008 +0000 > +++ b/tools/Makefile Thu Jan 17 16:22:30 2008 +0000 > @@ -27,6 +27,8 @@ SUBDIRS-$(PYTHON_TOOLS) += python > SUBDIRS-$(PYTHON_TOOLS) += python > SUBDIRS-$(PYTHON_TOOLS) += pygrub > endif > + > +SUBDIRS-y += fs-back > > .PHONY: all > all: check > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/fs-back/Makefile > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/fs-back/Makefile Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,40 @@ > +XEN_ROOT = ../.. > +include $(XEN_ROOT)/tools/Rules.mk > + > +INCLUDES += -I.. -I../lib > + > +IBIN = fs-backend > +INST_DIR = /usr/sbin > + > +CFLAGS += -Werror > +CFLAGS += -Wno-unused > +CFLAGS += -fno-strict-aliasing > +CFLAGS += -I $(XEN_LIBXC) > +CFLAGS += $(INCLUDES) -I. -I../xenstore > +CFLAGS += -D_GNU_SOURCE > + > +# Get gcc to generate the dependencies for us. > +CFLAGS += -Wp,-MD,.$(@F).d > +DEPS = .*.d > + > +LIBS := -L. -L.. -L../lib > +LIBS += -L$(XEN_LIBXC) > +LIBS += -lxenctrl -lpthread -lrt > +LIBS += -L$(XEN_XENSTORE) -lxenstore > + > +OBJS := fs-xenbus.o fs-ops.o > + > +all: $(IBIN) > + > +fs-backend: $(OBJS) fs-backend.c > + $(CC) $(CFLAGS) -o fs-backend $(OBJS) $(LIBS) fs-backend.c > + > +install: all > + $(INSTALL_PROG) $(IBIN) $(DESTDIR)$(INST_DIR) > + > +clean: > + rm -rf *.o *~ $(DEPS) xen $(IBIN) $(LIB) > + > +.PHONY: clean install > + > +-include $(DEPS) > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/fs-back/fs-backend.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/fs-back/fs-backend.c Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,346 @@ > +#undef NDEBUG > +#include <stdio.h> > +#include <string.h> > +#include <assert.h> > +#include <malloc.h> > +#include <pthread.h> > +#include <xenctrl.h> > +#include <aio.h> > +#include <sys/mman.h> > +#include <sys/select.h> > +#include <xen/io/ring.h> > +#include "fs-backend.h" > + > +struct xs_handle *xsh = NULL; > +static struct fs_export *fs_exports = NULL; > +static int export_id = 0; > +static int mount_id = 0; > + > +void dispatch_response(struct mount *mount, int priv_req_id) > +{ > + int i; > + struct fs_op *op; > + struct fs_request *req = &mount->requests[priv_req_id]; > + > + for(i=0;;i++) > + { > + op = fsops[i]; > + /* We should dispatch a response before reaching the end of the > array > */ > + assert(op != NULL); > + if(op->type == req->req_shadow.type) > + { > + printf("Found op for type=%d\n", op->type); > + /* There needs to be a response handler */ > + assert(op->response_handler != NULL); > + op->response_handler(mount, req); > + break; > + } > + } > + > + req->active = 0; > + add_id_to_freelist(priv_req_id, mount->freelist); > +} > + > +static void handle_aio_events(struct mount *mount) > +{ > + int fd, ret, count, i, notify; > + evtchn_port_t port; > + /* AIO control block for the evtchn file destriptor */ > + struct aiocb evtchn_cb; > + const struct aiocb * cb_list[mount->nr_entries]; > + int request_ids[mount->nr_entries]; > + > + /* Prepare the AIO control block for evtchn */ > + fd = xc_evtchn_fd(mount->evth); > + bzero(&evtchn_cb, sizeof(struct aiocb)); > + evtchn_cb.aio_fildes = fd; > + evtchn_cb.aio_nbytes = sizeof(port); > + evtchn_cb.aio_buf = &port; > + assert(aio_read(&evtchn_cb) == 0); > + > +wait_again: > + /* Create list of active AIO requests */ > + count = 0; > + for(i=0; i<mount->nr_entries; i++) > + if(mount->requests[i].active) > + { > + cb_list[count] = &mount->requests[i].aiocb; > + request_ids[count] = i; > + count++; > + } > + /* Add the event channel at the end of the list. Event channel needs to > be > + * handled last as it exits this function. */ > + cb_list[count] = &evtchn_cb; > + request_ids[count] = -1; > + count++; > + > + /* Block till an AIO requset finishes, or we get an event */ > + while(1) { > + int ret = aio_suspend(cb_list, count, NULL); > + if (!ret) > + break; > + assert(errno == EINTR); > + } > + for(i=0; i<count; i++) > + if(aio_error(cb_list[i]) != EINPROGRESS) > + { > + if(request_ids[i] >= 0) > + dispatch_response(mount, request_ids[i]); > + else > + goto read_event_channel; > + } > + > + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify); > + printf("Pushed responces and notify=%d\n", notify); > + if(notify) > + xc_evtchn_notify(mount->evth, mount->local_evtchn); > + > + goto wait_again; > + > +read_event_channel: > + assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t)); > + assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0); > +} > + > + > +void allocate_request_array(struct mount *mount) > +{ > + int i, nr_entries = mount->nr_entries; > + struct fs_request *requests; > + unsigned short *freelist; > + > + requests = malloc(sizeof(struct fs_request) *nr_entries); > + freelist = malloc(sizeof(unsigned short) * nr_entries); > + memset(requests, 0, sizeof(struct fs_request) * nr_entries); > + memset(freelist, 0, sizeof(unsigned short) * nr_entries); > + for(i=0; i< nr_entries; i++) > + { > + requests[i].active = 0; > + add_id_to_freelist(i, freelist); > + } > + mount->requests = requests; > + mount->freelist = freelist; > +} > + > + > +void* handle_mount(void *data) > +{ > + int more, notify; > + struct mount *mount = (struct mount *)data; > + > + printf("Starting a thread for mount: %d\n", mount->mount_id); > + allocate_request_array(mount); > + > + for(;;) > + { > + int nr_consumed=0; > + RING_IDX cons, rp; > + struct fsif_request *req; > + > + handle_aio_events(mount); > +moretodo: > + rp = mount->ring.sring->req_prod; > + rmb(); /* Ensure we see queued requests up to 'rp'. */ > + > + while ((cons = mount->ring.req_cons) != rp) > + { > + int i; > + struct fs_op *op; > + > + printf("Got a request at %d\n", cons); > + req = RING_GET_REQUEST(&mount->ring, cons); > + printf("Request type=%d\n", req->type); > + for(i=0;;i++) > + { > + op = fsops[i]; > + if(op == NULL) > + { > + /* We've reached the end of the array, no appropirate > + * handler found. Warn, ignore and continue. */ > + printf("WARN: Unknown request type: %d\n", req->type); > + mount->ring.req_cons++; > + break; > + } > + if(op->type == req->type) > + { > + /* There needs to be a dispatch handler */ > + assert(op->dispatch_handler != NULL); > + op->dispatch_handler(mount, req); > + break; > + } > + } > + > + nr_consumed++; > + } > + printf("Backend consumed: %d requests\n", nr_consumed); > + RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more); > + if(more) goto moretodo; > + > + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify); > + printf("Pushed responces and notify=%d\n", notify); > + if(notify) > + xc_evtchn_notify(mount->evth, mount->local_evtchn); > + } > + > + printf("Destroying thread for mount: %d\n", mount->mount_id); > + xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1); > + xc_gnttab_close(mount->gnth); > + xc_evtchn_unbind(mount->evth, mount->local_evtchn); > + xc_evtchn_close(mount->evth); > + free(mount->frontend); > + pthread_exit(NULL); > +} > + > +static void handle_connection(int frontend_dom_id, int export_id, char > *frontend) > +{ > + struct mount *mount; > + struct fs_export *export; > + int evt_port; > + pthread_t handling_thread; > + struct fsif_sring *sring; > + > + printf("Handling connection from dom=%d, for export=%d\n", > + frontend_dom_id, export_id); > + /* Try to find the export on the list */ > + export = fs_exports; > + while(export) > + { > + if(export->export_id == export_id) > + break; > + export = export->next; > + } > + if(!export) > + { > + printf("Could not find the export (the id is unknown).\n"); > + return; > + } > + > + mount = (struct mount*)malloc(sizeof(struct mount)); > + mount->dom_id = frontend_dom_id; > + mount->export = export; > + mount->mount_id = mount_id++; > + xenbus_read_mount_request(mount, frontend); > + printf("Frontend found at: %s (gref=%d, evtchn=%d)\n", > + mount->frontend, mount->gref, mount->remote_evtchn); > + xenbus_write_backend_node(mount); > + mount->evth = -1; > + mount->evth = xc_evtchn_open(); > + assert(mount->evth != -1); > + mount->local_evtchn = -1; > + mount->local_evtchn = xc_evtchn_bind_interdomain(mount->evth, > + mount->dom_id, > + mount->remote_evtchn); > + assert(mount->local_evtchn != -1); > + mount->gnth = -1; > + mount->gnth = xc_gnttab_open(); > + assert(mount->gnth != -1); > + sring = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + mount->gref, > + PROT_READ | PROT_WRITE); > + BACK_RING_INIT(&mount->ring, sring, PAGE_SIZE); > + mount->nr_entries = mount->ring.nr_ents; > + xenbus_write_backend_ready(mount); > + > + pthread_create(&handling_thread, NULL, &handle_mount, mount); > +} > + > +static void await_connections(void) > +{ > + int fd, ret, dom_id, export_id; > + fd_set fds; > + char **watch_paths; > + unsigned int len; > + char d; > + > + assert(xsh != NULL); > + fd = xenbus_get_watch_fd(); > + /* Infinite watch loop */ > + do { > + FD_ZERO(&fds); > + FD_SET(fd, &fds); > + ret = select(fd+1, &fds, NULL, NULL, NULL); > + assert(ret == 1); > + watch_paths = xs_read_watch(xsh, &len); > + assert(len == 2); > + assert(strcmp(watch_paths[1], "conn-watch") == 0); > + dom_id = -1; > + export_id = -1; > + d = 0; > + printf("Path changed %s\n", watch_paths[0]); > + sscanf(watch_paths[0], WATCH_NODE"/%d/%d/fronten%c", > + &dom_id, &export_id, &d); > + if((dom_id >= 0) && (export_id >= 0) && d == 'd') { > + char *frontend = xs_read(xsh, XBT_NULL, watch_paths[0], NULL); > + if (frontend) { > + handle_connection(dom_id, export_id, frontend); > + xs_rm(xsh, XBT_NULL, watch_paths[0]); > + } > + } > +next_select: > + printf("Awaiting next connection.\n"); > + /* TODO - we need to figure out what to free */ > + free(watch_paths); > + } while (1); > +} > + > +struct fs_export* create_export(char *name, char *export_path) > +{ > + struct fs_export *curr_export, **last_export; > + > + /* Create export structure */ > + curr_export = (struct fs_export *)malloc(sizeof(struct fs_export)); > + curr_export->name = name; > + curr_export->export_path = export_path; > + curr_export->export_id = export_id++; > + /* Thread it onto the list */ > + curr_export->next = NULL; > + last_export = &fs_exports; > + while(*last_export) > + last_export = &((*last_export)->next); > + *last_export = curr_export; > + > + return curr_export; > +} > + > + > +int main(void) > +{ > + struct fs_export *export; > + > + /* Open the connection to XenStore first */ > + xsh = xs_domain_open(); > + assert(xsh != NULL); > + xs_rm(xsh, XBT_NULL, ROOT_NODE); > + /* Create watch node */ > + xenbus_create_request_node(); > + > + /* Create & register the default export */ > + export = create_export("default", "/exports"); > + xenbus_register_export(export); > + > + await_connections(); > + /* Close the connection to XenStore when we are finished with everything > */ > + xs_daemon_close(xsh); > +#if 0 > + int xc_handle; > + char *shared_page; > + int prot = PROT_READ | PROT_WRITE; > + > + xc_handle = xc_gnttab_open(); > + printf("Main fn.\n"); > + > + shared_page = xc_gnttab_map_grant_ref(xc_handle, > + 7, > + 2047, > + prot); > + > + shared_page[20] = '\0'; > + printf("Current content of the page = %s\n", shared_page); > + sprintf(shared_page, "%s", "Haha dirty page now! Very bad page."); > + xc_gnttab_munmap(xc_handle, shared_page, 1); > + xc_gnttab_close(xc_handle); > + unrelated next line, saved for later convinience > + xc_evtchn_notify(mount->evth, mount->local_evtchn); > +#endif > +} > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/fs-back/fs-backend.h > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/fs-back/fs-backend.h Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,86 @@ > +#ifndef __LIB_FS_BACKEND__ > +#define __LIB_FS_BACKEND__ > + > +#include <aio.h> > +#include <xs.h> > +#include <xen/grant_table.h> > +#include <xen/event_channel.h> > +#include <xen/io/ring.h> > +#include <xen/io/fsif.h> > + > +#define ROOT_NODE "backend/vfs" > +#define EXPORTS_SUBNODE "exports" > +#define EXPORTS_NODE ROOT_NODE"/"EXPORTS_SUBNODE > +#define WATCH_NODE EXPORTS_NODE"/requests" > + > +struct fs_export > +{ > + int export_id; > + char *export_path; > + char *name; > + struct fs_export *next; > +}; > + > +struct fs_request > +{ > + int active; > + void *page; /* Pointer to mapped grant */ > + struct fsif_request req_shadow; > + struct aiocb aiocb; > +}; > + > + > +struct mount > +{ > + struct fs_export *export; > + int dom_id; > + char *frontend; > + int mount_id; /* = backend id */ > + grant_ref_t gref; > + evtchn_port_t remote_evtchn; > + int evth; /* Handle to the event channel */ > + evtchn_port_t local_evtchn; > + int gnth; > + struct fsif_back_ring ring; > + int nr_entries; > + struct fs_request *requests; > + unsigned short *freelist; > +}; > + > + > +/* Handle to XenStore driver */ > +extern struct xs_handle *xsh; > + > +bool xenbus_create_request_node(void); > +int xenbus_register_export(struct fs_export *export); > +int xenbus_get_watch_fd(void); > +void xenbus_read_mount_request(struct mount *mount, char *frontend); > +void xenbus_write_backend_node(struct mount *mount); > +void xenbus_write_backend_ready(struct mount *mount); > + > +/* File operations, implemented in fs-ops.c */ > +struct fs_op > +{ > + int type; /* Type of request (from fsif.h) this handlers > + are responsible for */ > + void (*dispatch_handler)(struct mount *mount, struct fsif_request *req); > + void (*response_handler)(struct mount *mount, struct fs_request *req); > +}; > + > +/* This NULL terminated array of all file requests handlers */ > +extern struct fs_op *fsops[]; > + > +static inline void add_id_to_freelist(unsigned int id,unsigned short* > freelist) > +{ > + freelist[id] = freelist[0]; > + freelist[0] = id; > +} > + > +static inline unsigned short get_id_from_freelist(unsigned short* freelist) > +{ > + unsigned int id = freelist[0]; > + freelist[0] = freelist[id]; > + return id; > +} > + > +#endif /* __LIB_FS_BACKEND__ */ > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/fs-back/fs-ops.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/fs-back/fs-ops.c Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,658 @@ > +#undef NDEBUG > +#include <stdio.h> > +#include <aio.h> > +#include <string.h> > +#include <assert.h> > +#include <fcntl.h> > +#include <dirent.h> > +#include <inttypes.h> > +#include <xenctrl.h> > +#include <sys/mman.h> > +#include <sys/types.h> > +#include <sys/stat.h> > +#include <sys/vfs.h> > +#include <sys/mount.h> > +#include <unistd.h> > +#include "fs-backend.h" > + > +/* For debugging only */ > +#include <sys/time.h> > +#include <time.h> > + > + > +#define BUFFER_SIZE 1024 > + > + > +unsigned short get_request(struct mount *mount, struct fsif_request *req) > +{ > + unsigned short id = get_id_from_freelist(mount->freelist); > + > + printf("Private Request id: %d\n", id); > + memcpy(&mount->requests[id].req_shadow, req, sizeof(struct > fsif_request)); > + mount->requests[id].active = 1; > + > + return id; > +} > + > + > +void dispatch_file_open(struct mount *mount, struct fsif_request *req) > +{ > + char *file_name, full_path[BUFFER_SIZE]; > + int fd; > + struct timeval tv1, tv2; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + printf("Dispatching file open operation (gref=%d).\n", > req->u.fopen.gref); > + /* Read the request, and open file */ > + file_name = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fopen.gref, > + PROT_READ); > + > + req_id = req->id; > + printf("File open issued for %s\n", file_name); > + assert(BUFFER_SIZE > > + strlen(file_name) + strlen(mount->export->export_path) + 1); > + sprintf(full_path, "%s/%s", mount->export->export_path, file_name); > + assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0); > + printf("Issuing open for %s\n", full_path); > + fd = open(full_path, O_RDWR); > + printf("Got FD: %d\n", fd); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)fd; > +} > + > +void dispatch_file_close(struct mount *mount, struct fsif_request *req) > +{ > + int ret; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + printf("Dispatching file close operation (fd=%d).\n", req->u.fclose.fd); > + > + req_id = req->id; > + ret = close(req->u.fclose.fd); > + printf("Got ret: %d\n", ret); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > +void dispatch_file_read(struct mount *mount, struct fsif_request *req) > +{ > + void *buf; > + int fd; > + uint16_t req_id; > + unsigned short priv_id; > + struct fs_request *priv_req; > + > + /* Read the request */ > + buf = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fread.gref, > + PROT_WRITE); > + > + req_id = req->id; > + printf("File read issued for FD=%d (len=%"PRIu64", offest=%"PRIu64")\n", > + req->u.fread.fd, req->u.fread.len, req->u.fread.offset); > + > + priv_id = get_request(mount, req); > + printf("Private id is: %d\n", priv_id); > + priv_req = &mount->requests[priv_id]; > + priv_req->page = buf; > + > + /* Dispatch AIO read request */ > + bzero(&priv_req->aiocb, sizeof(struct aiocb)); > + priv_req->aiocb.aio_fildes = req->u.fread.fd; > + priv_req->aiocb.aio_nbytes = req->u.fread.len; > + priv_req->aiocb.aio_offset = req->u.fread.offset; > + priv_req->aiocb.aio_buf = buf; > + assert(aio_read(&priv_req->aiocb) >= 0); > + > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > +} > + > +void end_file_read(struct mount *mount, struct fs_request *priv_req) > +{ > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + /* Release the grant */ > + assert(xc_gnttab_munmap(mount->gnth, priv_req->page, 1) == 0); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + req_id = priv_req->req_shadow.id; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb); > +} > + > +void dispatch_file_write(struct mount *mount, struct fsif_request *req) > +{ > + void *buf; > + int fd; > + uint16_t req_id; > + unsigned short priv_id; > + struct fs_request *priv_req; > + > + /* Read the request */ > + buf = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fwrite.gref, > + PROT_READ); > + > + req_id = req->id; > + printf("File write issued for FD=%d (len=%"PRIu64", > offest=%"PRIu64")\n", > + req->u.fwrite.fd, req->u.fwrite.len, req->u.fwrite.offset); > + > + priv_id = get_request(mount, req); > + printf("Private id is: %d\n", priv_id); > + priv_req = &mount->requests[priv_id]; > + priv_req->page = buf; > + > + /* Dispatch AIO write request */ > + bzero(&priv_req->aiocb, sizeof(struct aiocb)); > + priv_req->aiocb.aio_fildes = req->u.fwrite.fd; > + priv_req->aiocb.aio_nbytes = req->u.fwrite.len; > + priv_req->aiocb.aio_offset = req->u.fwrite.offset; > + priv_req->aiocb.aio_buf = buf; > + assert(aio_write(&priv_req->aiocb) >= 0); > + > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > +} > + > +void end_file_write(struct mount *mount, struct fs_request *priv_req) > +{ > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + /* Release the grant */ > + assert(xc_gnttab_munmap(mount->gnth, priv_req->page, 1) == 0); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + req_id = priv_req->req_shadow.id; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb); > +} > + > +void dispatch_stat(struct mount *mount, struct fsif_request *req) > +{ > + struct fsif_stat_response *buf; > + struct stat stat; > + int fd, ret; > + uint16_t req_id; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + > + /* Read the request */ > + buf = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fstat.gref, > + PROT_WRITE); > + > + req_id = req->id; > + fd = req->u.fstat.fd; > + printf("File stat issued for FD=%d\n", fd); > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + /* Stat, and create the response */ > + ret = fstat(fd, &stat); > + printf("Mode=%o, uid=%d, a_time=%ld\n", > + stat.st_mode, stat.st_uid, stat.st_atime); > + buf->stat_mode = stat.st_mode; > + buf->stat_uid = stat.st_uid; > + buf->stat_gid = stat.st_gid; > +#ifdef BLKGETSIZE > + if (S_ISBLK(stat.st_mode)) { > + int sectors; > + if (ioctl(fd, BLKGETSIZE, §ors)) { > + perror("getting device size\n"); > + buf->stat_size = 0; > + } else > + buf->stat_size = sectors << 9; > + } else > +#endif > + buf->stat_size = stat.st_size; > + buf->stat_atime = stat.st_atime; > + buf->stat_mtime = stat.st_mtime; > + buf->stat_ctime = stat.st_ctime; > + > + /* Release the grant */ > + assert(xc_gnttab_munmap(mount->gnth, buf, 1) == 0); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > + > +void dispatch_truncate(struct mount *mount, struct fsif_request *req) > +{ > + int fd, ret; > + uint16_t req_id; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + int64_t length; > + > + req_id = req->id; > + fd = req->u.ftruncate.fd; > + length = req->u.ftruncate.length; > + printf("File truncate issued for FD=%d, length=%"PRId64"\n", fd, > length); > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + /* Stat, and create the response */ > + ret = ftruncate(fd, length); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > +void dispatch_remove(struct mount *mount, struct fsif_request *req) > +{ > + char *file_name, full_path[BUFFER_SIZE]; > + int ret; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + printf("Dispatching remove operation (gref=%d).\n", req->u.fremove.gref); > + /* Read the request, and open file */ > + file_name = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fremove.gref, > + PROT_READ); > + > + req_id = req->id; > + printf("File remove issued for %s\n", file_name); > + assert(BUFFER_SIZE > > + strlen(file_name) + strlen(mount->export->export_path) + 1); > + sprintf(full_path, "%s/%s", mount->export->export_path, file_name); > + assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0); > + printf("Issuing remove for %s\n", full_path); > + ret = remove(full_path); > + printf("Got ret: %d\n", ret); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > + > +void dispatch_rename(struct mount *mount, struct fsif_request *req) > +{ > + char *buf, *old_file_name, *new_file_name; > + char old_full_path[BUFFER_SIZE], new_full_path[BUFFER_SIZE]; > + int ret; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + printf("Dispatching rename operation (gref=%d).\n", req->u.fremove.gref); > + /* Read the request, and open file */ > + buf = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.frename.gref, > + PROT_READ); > + > + req_id = req->id; > + old_file_name = buf + req->u.frename.old_name_offset; > + new_file_name = buf + req->u.frename.new_name_offset; > + printf("File rename issued for %s -> %s (buf=%s)\n", > + old_file_name, new_file_name, buf); > + assert(BUFFER_SIZE > > + strlen(old_file_name) + strlen(mount->export->export_path) + 1); > + assert(BUFFER_SIZE > > + strlen(new_file_name) + strlen(mount->export->export_path) + 1); > + sprintf(old_full_path, "%s/%s", mount->export->export_path, > old_file_name); > + sprintf(new_full_path, "%s/%s", mount->export->export_path, > new_file_name); > + assert(xc_gnttab_munmap(mount->gnth, buf, 1) == 0); > + printf("Issuing rename for %s -> %s\n", old_full_path, new_full_path); > + ret = rename(old_full_path, new_full_path); > + printf("Got ret: %d\n", ret); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > + > +void dispatch_create(struct mount *mount, struct fsif_request *req) > +{ > + char *file_name, full_path[BUFFER_SIZE]; > + int ret; > + int8_t directory; > + int32_t mode; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + printf("Dispatching file create operation (gref=%d).\n", > req->u.fcreate.gref); > + /* Read the request, and create file/directory */ > + mode = req->u.fcreate.mode; > + directory = req->u.fcreate.directory; > + file_name = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fcreate.gref, > + PROT_READ); > + > + req_id = req->id; > + printf("File create issued for %s\n", file_name); > + assert(BUFFER_SIZE > > + strlen(file_name) + strlen(mount->export->export_path) + 1); > + sprintf(full_path, "%s/%s", mount->export->export_path, file_name); > + assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + if(directory) > + { > + printf("Issuing create for directory: %s\n", full_path); > + ret = mkdir(full_path, mode); > + } > + else > + { > + printf("Issuing create for file: %s\n", full_path); > + ret = creat(full_path, mode); > + } > + printf("Got ret %d (errno=%d)\n", ret, errno); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > +void dispatch_list(struct mount *mount, struct fsif_request *req) > +{ > + char *file_name, *buf, full_path[BUFFER_SIZE]; > + uint32_t offset, nr_files, error_code; > + uint64_t ret_val; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + DIR *dir; > + struct dirent *dirent = NULL; > + > + printf("Dispatching list operation (gref=%d).\n", req->u.flist.gref); > + /* Read the request, and list directory */ > + offset = req->u.flist.offset; > + buf = file_name = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.flist.gref, > + PROT_READ | PROT_WRITE); > + > + req_id = req->id; > + printf("Dir list issued for %s\n", file_name); > + assert(BUFFER_SIZE > > + strlen(file_name) + strlen(mount->export->export_path) + 1); > + sprintf(full_path, "%s/%s", mount->export->export_path, file_name); > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + ret_val = 0; > + nr_files = 0; > + dir = opendir(full_path); > + if(dir == NULL) > + { > + error_code = errno; > + goto error_out; > + } > + /* Skip offset dirs */ > + dirent = readdir(dir); > + while(offset-- > 0 && dirent != NULL) > + dirent = readdir(dir); > + /* If there was any error with reading the directory, errno will be set > */ > + error_code = errno; > + /* Copy file names of the remaining non-NULL dirents into buf */ > + assert(NAME_MAX < PAGE_SIZE >> 1); > + while(dirent != NULL && > + (PAGE_SIZE - ((unsigned long)buf & PAGE_MASK) > NAME_MAX)) > + { > + int curr_length = strlen(dirent->d_name) + 1; > + > + memcpy(buf, dirent->d_name, curr_length); > + buf += curr_length; > + dirent = readdir(dir); > + error_code = errno; > + nr_files++; > + } > +error_out: > + ret_val = ((nr_files << NR_FILES_SHIFT) & NR_FILES_MASK) | > + ((error_code << ERROR_SHIFT) & ERROR_MASK) | > + (dirent != NULL ? HAS_MORE_FLAG : 0); > + assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = ret_val; > +} > + > +void dispatch_chmod(struct mount *mount, struct fsif_request *req) > +{ > + int fd, ret; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + int32_t mode; > + > + printf("Dispatching file chmod operation (fd=%d, mode=%o).\n", > + req->u.fchmod.fd, req->u.fchmod.mode); > + req_id = req->id; > + fd = req->u.fchmod.fd; > + mode = req->u.fchmod.mode; > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + ret = fchmod(fd, mode); > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > +void dispatch_fs_space(struct mount *mount, struct fsif_request *req) > +{ > + char *file_name, full_path[BUFFER_SIZE]; > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + struct statfs stat; > + int64_t ret; > + > + printf("Dispatching fs space operation (gref=%d).\n", > req->u.fspace.gref); > + /* Read the request, and open file */ > + file_name = xc_gnttab_map_grant_ref(mount->gnth, > + mount->dom_id, > + req->u.fspace.gref, > + PROT_READ); > + > + req_id = req->id; > + printf("Fs space issued for %s\n", file_name); > + assert(BUFFER_SIZE > > + strlen(file_name) + strlen(mount->export->export_path) + 1); > + sprintf(full_path, "%s/%s", mount->export->export_path, file_name); > + assert(xc_gnttab_munmap(mount->gnth, file_name, 1) == 0); > + printf("Issuing fs space for %s\n", full_path); > + ret = statfs(full_path, &stat); > + if(ret >= 0) > + ret = stat.f_bsize * stat.f_bfree; > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > + > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)ret; > +} > + > +void dispatch_file_sync(struct mount *mount, struct fsif_request *req) > +{ > + int fd; > + uint16_t req_id; > + unsigned short priv_id; > + struct fs_request *priv_req; > + > + req_id = req->id; > + fd = req->u.fsync.fd; > + printf("File sync issued for FD=%d\n", fd); > + > + priv_id = get_request(mount, req); > + printf("Private id is: %d\n", priv_id); > + priv_req = &mount->requests[priv_id]; > + > + /* Dispatch AIO read request */ > + bzero(&priv_req->aiocb, sizeof(struct aiocb)); > + priv_req->aiocb.aio_fildes = fd; > + assert(aio_fsync(O_SYNC, &priv_req->aiocb) >= 0); > + > + > + /* We can advance the request consumer index, from here on, the request > + * should not be used (it may be overrinden by a response) */ > + mount->ring.req_cons++; > +} > + > +void end_file_sync(struct mount *mount, struct fs_request *priv_req) > +{ > + RING_IDX rsp_idx; > + fsif_response_t *rsp; > + uint16_t req_id; > + > + /* Get a response from the ring */ > + rsp_idx = mount->ring.rsp_prod_pvt++; > + req_id = priv_req->req_shadow.id; > + printf("Writing response at: idx=%d, id=%d\n", rsp_idx, req_id); > + rsp = RING_GET_RESPONSE(&mount->ring, rsp_idx); > + rsp->id = req_id; > + rsp->ret_val = (uint64_t)aio_return(&priv_req->aiocb); > +} > + > +struct fs_op fopen_op = {.type = REQ_FILE_OPEN, > + .dispatch_handler = dispatch_file_open, > + .response_handler = NULL}; > +struct fs_op fclose_op = {.type = REQ_FILE_CLOSE, > + .dispatch_handler = dispatch_file_close, > + .response_handler = NULL}; > +struct fs_op fread_op = {.type = REQ_FILE_READ, > + .dispatch_handler = dispatch_file_read, > + .response_handler = end_file_read}; > +struct fs_op fwrite_op = {.type = REQ_FILE_WRITE, > + .dispatch_handler = dispatch_file_write, > + .response_handler = end_file_write}; > +struct fs_op fstat_op = {.type = REQ_STAT, > + .dispatch_handler = dispatch_stat, > + .response_handler = NULL}; > +struct fs_op ftruncate_op = {.type = REQ_FILE_TRUNCATE, > + .dispatch_handler = dispatch_truncate, > + .response_handler = NULL}; > +struct fs_op fremove_op = {.type = REQ_REMOVE, > + .dispatch_handler = dispatch_remove, > + .response_handler = NULL}; > +struct fs_op frename_op = {.type = REQ_RENAME, > + .dispatch_handler = dispatch_rename, > + .response_handler = NULL}; > +struct fs_op fcreate_op = {.type = REQ_CREATE, > + .dispatch_handler = dispatch_create, > + .response_handler = NULL}; > +struct fs_op flist_op = {.type = REQ_DIR_LIST, > + .dispatch_handler = dispatch_list, > + .response_handler = NULL}; > +struct fs_op fchmod_op = {.type = REQ_CHMOD, > + .dispatch_handler = dispatch_chmod, > + .response_handler = NULL}; > +struct fs_op fspace_op = {.type = REQ_FS_SPACE, > + .dispatch_handler = dispatch_fs_space, > + .response_handler = NULL}; > +struct fs_op fsync_op = {.type = REQ_FILE_SYNC, > + .dispatch_handler = dispatch_file_sync, > + .response_handler = end_file_sync}; > + > + > +struct fs_op *fsops[] = {&fopen_op, > + &fclose_op, > + &fread_op, > + &fwrite_op, > + &fstat_op, > + &ftruncate_op, > + &fremove_op, > + &frename_op, > + &fcreate_op, > + &flist_op, > + &fchmod_op, > + &fspace_op, > + &fsync_op, > + NULL}; > diff -r c4babfc157d5 -r b0e2c382ffb2 tools/fs-back/fs-xenbus.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/fs-back/fs-xenbus.c Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,180 @@ > +#undef NDEBUG > +#include <stdio.h> > +#include <stdlib.h> > +#include <stdarg.h> > +#include <string.h> > +#include <assert.h> > +#include <xenctrl.h> > +#include <xs.h> > +#include <xen/io/fsif.h> > +#include "fs-backend.h" > + > + > +static bool xenbus_printf(struct xs_handle *xsh, > + xs_transaction_t xbt, > + char* node, > + char* path, > + char* fmt, > + ...) > +{ > + char fullpath[1024]; > + char val[1024]; > + va_list args; > + > + va_start(args, fmt); > + sprintf(fullpath,"%s/%s", node, path); > + vsprintf(val, fmt, args); > + va_end(args); > + printf("xenbus_printf (%s) <= %s.\n", fullpath, val); > + > + return xs_write(xsh, xbt, fullpath, val, strlen(val)); > +} > + > +bool xenbus_create_request_node(void) > +{ > + bool ret; > + struct xs_permissions perms; > + > + assert(xsh != NULL); > + xs_rm(xsh, XBT_NULL, WATCH_NODE); > + ret = xs_mkdir(xsh, XBT_NULL, WATCH_NODE); > + if (!ret) > + return false; > + > + perms.id = 0; > + perms.perms = XS_PERM_WRITE; > + ret = xs_set_permissions(xsh, XBT_NULL, WATCH_NODE, &perms, 1); > + > + return ret; > +} > + > +int xenbus_register_export(struct fs_export *export) > +{ > + xs_transaction_t xst = 0; > + char node[1024]; > + struct xs_permissions perms; > + > + assert(xsh != NULL); > + if(xsh == NULL) > + { > + printf("Could not open connection to xenbus deamon.\n"); > + goto error_exit; > + } > + printf("Connection to the xenbus deamon opened successfully.\n"); > + > + /* Start transaction */ > + xst = xs_transaction_start(xsh); > + if(xst == 0) > + { > + printf("Could not start a transaction.\n"); > + goto error_exit; > + } > + printf("XS transaction is %d\n", xst); > + > + /* Create node string */ > + sprintf(node, "%s/%d", EXPORTS_NODE, export->export_id); > + /* Remove old export (if exists) */ > + xs_rm(xsh, xst, node); > + > + if(!xenbus_printf(xsh, xst, node, "name", "%s", export->name)) > + { > + printf("Could not write the export node.\n"); > + goto error_exit; > + } > + > + /* People need to be able to read our export */ > + perms.id = 0; > + perms.perms = XS_PERM_READ; > + if(!xs_set_permissions(xsh, xst, EXPORTS_NODE, &perms, 1)) > + { > + printf("Could not set permissions on the export node.\n"); > + goto error_exit; > + } > + > + xs_transaction_end(xsh, xst, 0); > + return 0; > + > +error_exit: > + if(xst != 0) > + xs_transaction_end(xsh, xst, 1); > + return -1; > +} > + > +int xenbus_get_watch_fd(void) > +{ > + int res; > + assert(xsh != NULL); > + res = xs_watch(xsh, WATCH_NODE, "conn-watch"); > + assert(res); > + return xs_fileno(xsh); > +} > + > +void xenbus_read_mount_request(struct mount *mount, char *frontend) > +{ > + char node[1024]; > + char *s; > + > + assert(xsh != NULL); > +#if 0 > + sprintf(node, WATCH_NODE"/%d/%d/frontend", > + mount->dom_id, mount->export->export_id); > + frontend = xs_read(xsh, XBT_NULL, node, NULL); > +#endif > + mount->frontend = frontend; > + sprintf(node, "%s/state", frontend); > + s = xs_read(xsh, XBT_NULL, node, NULL); > + assert(strcmp(s, STATE_READY) == 0); > + free(s); > + sprintf(node, "%s/ring-ref", frontend); > + s = xs_read(xsh, XBT_NULL, node, NULL); > + mount->gref = atoi(s); > + free(s); > + sprintf(node, "%s/event-channel", frontend); > + s = xs_read(xsh, XBT_NULL, node, NULL); > + mount->remote_evtchn = atoi(s); > + free(s); > +} > + > +/* Small utility function to figure out our domain id */ > +static int get_self_id(void) > +{ > + char *dom_id; > + int ret; > + > + assert(xsh != NULL); > + dom_id = xs_read(xsh, XBT_NULL, "domid", NULL); > + sscanf(dom_id, "%d", &ret); > + free(dom_id); > + > + return ret; > +} > + > + > +void xenbus_write_backend_node(struct mount *mount) > +{ > + char node[1024], backend_node[1024]; > + int self_id; > + > + assert(xsh != NULL); > + self_id = get_self_id(); > + printf("Our own dom_id=%d\n", self_id); > + sprintf(node, "%s/backend", mount->frontend); > + sprintf(backend_node, "/local/domain/%d/"ROOT_NODE"/%d", > + self_id, mount->mount_id); > + xs_write(xsh, XBT_NULL, node, backend_node, strlen(backend_node)); > + > + sprintf(node, ROOT_NODE"/%d/state", mount->mount_id); > + xs_write(xsh, XBT_NULL, node, STATE_INITIALISED, > strlen(STATE_INITIALISED)); > +} > + > +void xenbus_write_backend_ready(struct mount *mount) > +{ > + char node[1024]; > + int self_id; > + > + assert(xsh != NULL); > + self_id = get_self_id(); > + sprintf(node, ROOT_NODE"/%d/state", mount->mount_id); > + xs_write(xsh, XBT_NULL, node, STATE_READY, strlen(STATE_READY)); > +} > + > diff -r c4babfc157d5 -r b0e2c382ffb2 xen/include/public/io/fsif.h > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/xen/include/public/io/fsif.h Thu Jan 17 16:22:30 2008 +0000 > @@ -0,0 +1,181 @@ > +/**************************************************************************** > ** > + * fsif.h > + * > + * Interface to FS level split device drivers. > + * > + * Permission is hereby granted, free of charge, to any person obtaining a > copy > + * of this software and associated documentation files (the "Software"), to > + * deal in the Software without restriction, including without limitation the > + * rights to use, copy, modify, merge, publish, distribute, sublicense, > and/or > + * sell copies of the Software, and to permit persons to whom the Software is > + * furnished to do so, subject to the following conditions: > + * > + * The above copyright notice and this permission notice shall be included in > + * all copies or substantial portions of the Software. > + * > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL > THE > + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER > + * DEALINGS IN THE SOFTWARE. > + * > + * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc. > + */ > + > +#ifndef __XEN_PUBLIC_IO_FSIF_H__ > +#define __XEN_PUBLIC_IO_FSIF_H__ > + > +#include "ring.h" > +#include "../grant_table.h" > + > +#define REQ_FILE_OPEN 1 > +#define REQ_FILE_CLOSE 2 > +#define REQ_FILE_READ 3 > +#define REQ_FILE_WRITE 4 > +#define REQ_STAT 5 > +#define REQ_FILE_TRUNCATE 6 > +#define REQ_REMOVE 7 > +#define REQ_RENAME 8 > +#define REQ_CREATE 9 > +#define REQ_DIR_LIST 10 > +#define REQ_CHMOD 11 > +#define REQ_FS_SPACE 12 > +#define REQ_FILE_SYNC 13 > + > +struct fsif_open_request { > + grant_ref_t gref; > +}; > + > +struct fsif_close_request { > + uint32_t fd; > +}; > + > +struct fsif_read_request { > + uint32_t fd; > + grant_ref_t gref; > + uint64_t len; > + uint64_t offset; > +}; > + > +struct fsif_write_request { > + uint32_t fd; > + grant_ref_t gref; > + uint64_t len; > + uint64_t offset; > +}; > + > +struct fsif_stat_request { > + uint32_t fd; > + grant_ref_t gref; > +}; > + > +/* This structure is a copy of some fields from stat structure, writen to the > + * granted page. */ > +struct fsif_stat_response { > + int32_t stat_mode; > + uint32_t stat_uid; > + uint32_t stat_gid; > + int32_t pad; > + int64_t stat_size; > + int64_t stat_atime; > + int64_t stat_mtime; > + int64_t stat_ctime; > +}; > + > +struct fsif_truncate_request { > + uint32_t fd; > + int32_t pad; > + int64_t length; > +}; > + > +struct fsif_remove_request { > + grant_ref_t gref; > +}; > + > +struct fsif_rename_request { > + uint16_t old_name_offset; > + uint16_t new_name_offset; > + grant_ref_t gref; > +}; > + > +struct fsif_create_request { > + int8_t directory; > + int8_t pad; > + int16_t pad2; > + int32_t mode; > + grant_ref_t gref; > +}; > + > +struct fsif_list_request { > + uint32_t offset; > + grant_ref_t gref; > +}; > + > +#define NR_FILES_SHIFT 0 > +#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ > +#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) > +#define ERROR_SIZE 32 /* 32 bits for the error mask */ > +#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) > +#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) > +#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) > +#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) > + > +struct fsif_chmod_request { > + uint32_t fd; > + int32_t mode; > +}; > + > +struct fsif_space_request { > + grant_ref_t gref; > +}; > + > +struct fsif_sync_request { > + uint32_t fd; > +}; > + > + > +/* FS operation request */ > +struct fsif_request { > + uint8_t type; /* Type of the request */ > + uint8_t pad; > + uint16_t id; /* Request ID, copied to the response */ > + uint32_t pad2; > + union { > + struct fsif_open_request fopen; > + struct fsif_close_request fclose; > + struct fsif_read_request fread; > + struct fsif_write_request fwrite; > + struct fsif_stat_request fstat; > + struct fsif_truncate_request ftruncate; > + struct fsif_remove_request fremove; > + struct fsif_rename_request frename; > + struct fsif_create_request fcreate; > + struct fsif_list_request flist; > + struct fsif_chmod_request fchmod; > + struct fsif_space_request fspace; > + struct fsif_sync_request fsync; > + } u; > +}; > +typedef struct fsif_request fsif_request_t; > + > +/* FS operation response */ > +struct fsif_response { > + uint16_t id; > + uint16_t pad1; > + uint32_t pad2; > + uint64_t ret_val; > +}; > + > +typedef struct fsif_response fsif_response_t; > + > + > +DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); > + > +#define STATE_INITIALISED "init" > +#define STATE_READY "ready" > + > + > + > +#endif > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxxxxxxxx > http://lists.xensource.com/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |