[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Fix some newline ugliness that BK wouldn't correct.
# HG changeset patch # User akw27@xxxxxxxxxxxxxxxxxxxxxx # Node ID 99ff7c3435b2145fb6ef4bb8e340a25a762e0afd # Parent fc4e10d0455a83c57d6667dda4442198322f5e67 Fix some newline ugliness that BK wouldn't correct. Signed-off-by: akw27@xxxxxxxxxxxx diff -r fc4e10d0455a -r 99ff7c3435b2 tools/blktap/block-async.h --- a/tools/blktap/block-async.h Sat Jul 2 22:37:55 2005 +++ b/tools/blktap/block-async.h Sun Jul 3 12:02:01 2005 @@ -1,69 +1,69 @@ -/* block-async.h - * - * Asynchronous block wrappers for parallax. - */ - -#ifndef _BLOCKASYNC_H_ -#define _BLOCKASYNC_H_ - -#include <assert.h> -#include <xc.h> -#include "vdi.h" - -struct io_ret -{ +/* block-async.h + * + * Asynchronous block wrappers for parallax. + */ + +#ifndef _BLOCKASYNC_H_ +#define _BLOCKASYNC_H_ + +#include <assert.h> +#include <xc.h> +#include "vdi.h" + +struct io_ret +{ enum {IO_ADDR_T, IO_BLOCK_T, IO_INT_T} type; union { u64 a; char *b; int i; } u; -}; - -typedef void (*io_cb_t)(struct io_ret r, void *param); - -/* per-vdi lock structures to make sure requests run in a safe order. */ -struct radix_wait { +}; + +typedef void (*io_cb_t)(struct io_ret r, void *param); + +/* per-vdi lock structures to make sure requests run in a safe order. */ +struct radix_wait { enum {RLOCK, WLOCK} type; io_cb_t cb; void *param; struct radix_wait *next; -}; - -struct radix_lock { +}; + +struct radix_lock { pthread_mutex_t lock; int lines[1024]; struct radix_wait *waiters[1024]; enum {ANY, READ, STOP} state[1024]; -}; -void radix_lock_init(struct radix_lock *r); - -void block_read(u64 addr, io_cb_t cb, void *param); -void block_write(u64 addr, char *block, io_cb_t cb, void *param); -void block_alloc(char *block, io_cb_t cb, void *param); -void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param); -void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param); -void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param); -void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param); -void init_block_async(void); - -static inline u64 IO_ADDR(struct io_ret r) -{ +}; +void radix_lock_init(struct radix_lock *r); + +void block_read(u64 addr, io_cb_t cb, void *param); +void block_write(u64 addr, char *block, io_cb_t cb, void *param); +void block_alloc(char *block, io_cb_t cb, void *param); +void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param); +void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param); +void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param); +void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param); +void init_block_async(void); + +static inline u64 IO_ADDR(struct io_ret r) +{ assert(r.type == IO_ADDR_T); return r.u.a; -} - -static inline char *IO_BLOCK(struct io_ret r) -{ +} + +static inline char *IO_BLOCK(struct io_ret r) +{ assert(r.type == IO_BLOCK_T); return r.u.b; -} - -static inline int IO_INT(struct io_ret r) -{ +} + +static inline int IO_INT(struct io_ret r) +{ assert(r.type == IO_INT_T); return r.u.i; -} - - -#endif //_BLOCKASYNC_H_ +} + + +#endif //_BLOCKASYNC_H_ diff -r fc4e10d0455a -r 99ff7c3435b2 tools/blktap/block-async.c --- a/tools/blktap/block-async.c Sat Jul 2 22:37:55 2005 +++ b/tools/blktap/block-async.c Sun Jul 3 12:02:01 2005 @@ -1,49 +1,49 @@ -/* block-async.c - * - * Asynchronous block wrappers for parallax. - */ - - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <pthread.h> -#include "block-async.h" -#include "blockstore.h" -#include "vdi.h" - - -#if 0 -#define DPRINTF(_f, _a...) printf ( _f , ## _a ) -#else -#define DPRINTF(_f, _a...) ((void)0) -#endif - -/* We have a queue of outstanding I/O requests implemented as a - * circular producer-consumer ring with free-running buffers. - * to allow reordering, this ring indirects to indexes in an - * ring of io_structs. - * - * the block_* calls may either add an entry to this ring and return, - * or satisfy the request immediately and call the callback directly. - * None of the io calls in parallax should be nested enough to worry - * about stack problems with this approach. - */ - -struct read_args { +/* block-async.c + * + * Asynchronous block wrappers for parallax. + */ + + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <pthread.h> +#include "block-async.h" +#include "blockstore.h" +#include "vdi.h" + + +#if 0 +#define DPRINTF(_f, _a...) printf ( _f , ## _a ) +#else +#define DPRINTF(_f, _a...) ((void)0) +#endif + +/* We have a queue of outstanding I/O requests implemented as a + * circular producer-consumer ring with free-running buffers. + * to allow reordering, this ring indirects to indexes in an + * ring of io_structs. + * + * the block_* calls may either add an entry to this ring and return, + * or satisfy the request immediately and call the callback directly. + * None of the io calls in parallax should be nested enough to worry + * about stack problems with this approach. + */ + +struct read_args { u64 addr; -}; - -struct write_args { +}; + +struct write_args { u64 addr; char *block; -}; - -struct alloc_args { +}; + +struct alloc_args { char *block; -}; - -struct pending_io_req { +}; + +struct pending_io_req { enum {IO_READ, IO_WRITE, IO_ALLOC, IO_RWAKE, IO_WWAKE} op; union { struct read_args r; @@ -52,10 +52,10 @@ } u; io_cb_t cb; void *param; -}; - -void radix_lock_init(struct radix_lock *r) -{ +}; + +void radix_lock_init(struct radix_lock *r) +{ int i; pthread_mutex_init(&r->lock, NULL); @@ -64,38 +64,38 @@ r->waiters[i] = NULL; r->state[i] = ANY; } -} - -/* maximum outstanding I/O requests issued asynchronously */ -/* must be a power of 2.*/ +} + +/* maximum outstanding I/O requests issued asynchronously */ +/* must be a power of 2.*/ #define MAX_PENDING_IO 1024 - -/* how many threads to concurrently issue I/O to the disk. */ + +/* how many threads to concurrently issue I/O to the disk. */ #define IO_POOL_SIZE 10 - -static struct pending_io_req pending_io_reqs[MAX_PENDING_IO]; -static int pending_io_list[MAX_PENDING_IO]; -static unsigned long io_prod = 0, io_cons = 0, io_free = 0; -#define PENDING_IO_MASK(_x) ((_x) & (MAX_PENDING_IO - 1)) -#define PENDING_IO_IDX(_x) ((_x) - pending_io_reqs) -#define PENDING_IO_ENT(_x) \ - (&pending_io_reqs[pending_io_list[PENDING_IO_MASK(_x)]]) -#define CAN_PRODUCE_PENDING_IO ((io_free + MAX_PENDING_IO) != io_prod) -#define CAN_CONSUME_PENDING_IO (io_cons != io_prod) -static pthread_mutex_t pending_io_lock = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t pending_io_cond = PTHREAD_COND_INITIALIZER; - -static void init_pending_io(void) -{ + +static struct pending_io_req pending_io_reqs[MAX_PENDING_IO]; +static int pending_io_list[MAX_PENDING_IO]; +static unsigned long io_prod = 0, io_cons = 0, io_free = 0; +#define PENDING_IO_MASK(_x) ((_x) & (MAX_PENDING_IO - 1)) +#define PENDING_IO_IDX(_x) ((_x) - pending_io_reqs) +#define PENDING_IO_ENT(_x) \ + (&pending_io_reqs[pending_io_list[PENDING_IO_MASK(_x)]]) +#define CAN_PRODUCE_PENDING_IO ((io_free + MAX_PENDING_IO) != io_prod) +#define CAN_CONSUME_PENDING_IO (io_cons != io_prod) +static pthread_mutex_t pending_io_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t pending_io_cond = PTHREAD_COND_INITIALIZER; + +static void init_pending_io(void) +{ int i; - + for (i=0; i<MAX_PENDING_IO; i++) pending_io_list[i] = i; - -} - -void block_read(u64 addr, io_cb_t cb, void *param) -{ + +} + +void block_read(u64 addr, io_cb_t cb, void *param) +{ struct pending_io_req *req; pthread_mutex_lock(&pending_io_lock); @@ -108,13 +108,13 @@ req->cb = cb; req->param = param; - pthread_cond_signal(&pending_io_cond); + pthread_cond_signal(&pending_io_cond); pthread_mutex_unlock(&pending_io_lock); -} - - -void block_write(u64 addr, char *block, io_cb_t cb, void *param) -{ +} + + +void block_write(u64 addr, char *block, io_cb_t cb, void *param) +{ struct pending_io_req *req; pthread_mutex_lock(&pending_io_lock); @@ -128,15 +128,15 @@ req->cb = cb; req->param = param; - pthread_cond_signal(&pending_io_cond); + pthread_cond_signal(&pending_io_cond); pthread_mutex_unlock(&pending_io_lock); -} - - -void block_alloc(char *block, io_cb_t cb, void *param) -{ - struct pending_io_req *req; - +} + + +void block_alloc(char *block, io_cb_t cb, void *param) +{ + struct pending_io_req *req; + pthread_mutex_lock(&pending_io_lock); assert(CAN_PRODUCE_PENDING_IO); @@ -146,12 +146,12 @@ req->cb = cb; req->param = param; - pthread_cond_signal(&pending_io_cond); + pthread_cond_signal(&pending_io_cond); pthread_mutex_unlock(&pending_io_lock); -} - -void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param) -{ +} + +void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param) +{ struct io_ret ret; pthread_mutex_lock(&r->lock); @@ -179,11 +179,11 @@ pthread_mutex_unlock(&r->lock); return; } -} - - -void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param) -{ +} + + +void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param) +{ struct io_ret ret; pthread_mutex_lock(&r->lock); @@ -212,12 +212,12 @@ pthread_mutex_unlock(&r->lock); return; } - -} - -/* called with radix_lock locked and lock count of zero. */ -static void wake_waiters(struct radix_lock *r, int row) -{ + +} + +/* called with radix_lock locked and lock count of zero. */ +static void wake_waiters(struct radix_lock *r, int row) +{ struct pending_io_req *req; struct radix_wait *rw; @@ -263,14 +263,14 @@ } pthread_mutex_lock(&pending_io_lock); - pthread_cond_signal(&pending_io_cond); + pthread_cond_signal(&pending_io_cond); pthread_mutex_unlock(&pending_io_lock); -} - -void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param) -{ +} + +void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param) +{ struct io_ret ret; - + pthread_mutex_lock(&r->lock); assert(r->lines[row] > 0); /* try to catch misuse. */ r->lines[row]--; @@ -280,10 +280,10 @@ } pthread_mutex_unlock(&r->lock); cb(ret, param); -} - -void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param) -{ +} + +void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param) +{ struct io_ret ret; pthread_mutex_lock(&r->lock); @@ -293,11 +293,11 @@ wake_waiters(r, row); pthread_mutex_unlock(&r->lock); cb(ret, param); -} - -/* consumer calls */ -static void do_next_io_req(struct pending_io_req *req) -{ +} + +/* consumer calls */ +static void do_next_io_req(struct pending_io_req *req) +{ struct io_ret ret; void *param; @@ -334,60 +334,60 @@ pthread_mutex_lock(&pending_io_lock); pending_io_list[PENDING_IO_MASK(io_free++)] = PENDING_IO_IDX(req); pthread_mutex_unlock(&pending_io_lock); - + assert(req->cb != NULL); req->cb(ret, param); -} - -void *io_thread(void *param) -{ +} + +void *io_thread(void *param) +{ int tid; struct pending_io_req *req; /* Set this thread's tid. */ - tid = *(int *)param; - free(param); - -start: - pthread_mutex_lock(&pending_io_lock); - while (io_prod == io_cons) { - pthread_cond_wait(&pending_io_cond, &pending_io_lock); - } - - if (io_prod == io_cons) { - /* unnecessary wakeup. */ - pthread_mutex_unlock(&pending_io_lock); - goto start; - } - + tid = *(int *)param; + free(param); + +start: + pthread_mutex_lock(&pending_io_lock); + while (io_prod == io_cons) { + pthread_cond_wait(&pending_io_cond, &pending_io_lock); + } + + if (io_prod == io_cons) { + /* unnecessary wakeup. */ + pthread_mutex_unlock(&pending_io_lock); + goto start; + } + req = PENDING_IO_ENT(io_cons++); pthread_mutex_unlock(&pending_io_lock); - - do_next_io_req(req); - + + do_next_io_req(req); + goto start; - -} - -static pthread_t io_pool[IO_POOL_SIZE]; -void start_io_threads(void) - -{ + +} + +static pthread_t io_pool[IO_POOL_SIZE]; +void start_io_threads(void) + +{ int i, tid=0; for (i=0; i < IO_POOL_SIZE; i++) { - int ret, *t; - t = (int *)malloc(sizeof(int)); - *t = tid++; - ret = pthread_create(&io_pool[i], NULL, io_thread, t); - if (ret != 0) printf("Error starting thread %d\n", i); - } - -} - -void init_block_async(void) -{ + int ret, *t; + t = (int *)malloc(sizeof(int)); + *t = tid++; + ret = pthread_create(&io_pool[i], NULL, io_thread, t); + if (ret != 0) printf("Error starting thread %d\n", i); + } + +} + +void init_block_async(void) +{ init_pending_io(); start_io_threads(); -} +} diff -r fc4e10d0455a -r 99ff7c3435b2 tools/blktap/requests-async.h --- a/tools/blktap/requests-async.h Sat Jul 2 22:37:55 2005 +++ b/tools/blktap/requests-async.h Sun Jul 3 12:02:01 2005 @@ -1,24 +1,24 @@ -#ifndef _REQUESTSASYNC_H_ -#define _REQUESTSASYNC_H_ - -#include "block-async.h" -#include "blockstore.h" /* for newblock etc. */ - -/* -#define BLOCK_SIZE 4096 -#define ZERO 0ULL -#define getid(x) (((x)>>1)&0x7fffffffffffffffLLU) -#define iswritable(x) (((x) & 1LLU) != 0) -#define writable(x) (((x) << 1) | 1LLU) -#define readonly(x) ((u64)((x) << 1)) -*/ - +#ifndef _REQUESTSASYNC_H_ +#define _REQUESTSASYNC_H_ + +#include "block-async.h" +#include "blockstore.h" /* for newblock etc. */ + +/* +#define BLOCK_SIZE 4096 +#define ZERO 0ULL +#define getid(x) (((x)>>1)&0x7fffffffffffffffLLU) +#define iswritable(x) (((x) & 1LLU) != 0) +#define writable(x) (((x) << 1) | 1LLU) +#define readonly(x) ((u64)((x) << 1)) +*/ + #define VADDR_MASK 0x0000000003ffffffLLU /* 26-bits = 256Gig */ #define VALID_VADDR(x) (((x) & VADDR_MASK) == (x)) int vdi_read (vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param); int vdi_write(vdi_t *vdi, u64 vaddr, char *block, io_cb_t cb, void *param); - + /* synchronous versions: */ char *vdi_read_s (vdi_t *vdi, u64 vaddr); int vdi_write_s(vdi_t *vdi, u64 vaddr, char *block); @@ -26,4 +26,4 @@ #define ERR_BAD_VADDR -1 #define ERR_NOMEM -2 -#endif //_REQUESTSASYNC_H_ +#endif //_REQUESTSASYNC_H_ diff -r fc4e10d0455a -r 99ff7c3435b2 tools/blktap/requests-async.c --- a/tools/blktap/requests-async.c Sat Jul 2 22:37:55 2005 +++ b/tools/blktap/requests-async.c Sun Jul 3 12:02:01 2005 @@ -1,55 +1,55 @@ /* requests-async.c - * + * * asynchronous request dispatcher for radix access in parallax. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> #include <ctype.h> -#include <assert.h> -#include <pthread.h> +#include <assert.h> +#include <pthread.h> #include <err.h> #include <zlib.h> /* for crc32() */ -#include "requests-async.h" -#include "vdi.h" -#include "radix.h" - -#define L1_IDX(_a) (((_a) & 0x0000000007fc0000ULL) >> 18) -#define L2_IDX(_a) (((_a) & 0x000000000003fe00ULL) >> 9) -#define L3_IDX(_a) (((_a) & 0x00000000000001ffULL)) - - -#if 0 -#define DPRINTF(_f, _a...) printf ( _f , ## _a ) -#else -#define DPRINTF(_f, _a...) ((void)0) -#endif - +#include "requests-async.h" +#include "vdi.h" +#include "radix.h" + +#define L1_IDX(_a) (((_a) & 0x0000000007fc0000ULL) >> 18) +#define L2_IDX(_a) (((_a) & 0x000000000003fe00ULL) >> 9) +#define L3_IDX(_a) (((_a) & 0x00000000000001ffULL)) + + +#if 0 +#define DPRINTF(_f, _a...) printf ( _f , ## _a ) +#else +#define DPRINTF(_f, _a...) ((void)0) +#endif + struct block_info { u32 crc; u32 unused; }; - -struct io_req { - enum { IO_OP_READ, IO_OP_WRITE } op; - u64 root; - u64 vaddr; - int state; - io_cb_t cb; - void *param; - struct radix_lock *lock; - - /* internal stuff: */ + +struct io_req { + enum { IO_OP_READ, IO_OP_WRITE } op; + u64 root; + u64 vaddr; + int state; + io_cb_t cb; + void *param; + struct radix_lock *lock; + + /* internal stuff: */ struct io_ret retval;/* holds the return while we unlock. */ char *block; /* the block to write */ radix_tree_node radix[3]; u64 radix_addr[3]; struct block_info bi; -}; - -void clear_w_bits(radix_tree_node node) -{ +}; + +void clear_w_bits(radix_tree_node node) +{ int i; for (i=0; i<RADIX_TREE_MAP_ENTRIES; i++) node[i] = node[i] & ONEMASK; @@ -62,111 +62,111 @@ for (i=0; i<RADIX_TREE_MAP_ENTRIES; i+=2) node[i] = node[i] & ONEMASK; return; -} - -enum states { - /* both */ - READ_L1, - READ_L2, - READ_L3, - - /* read */ - READ_LOCKED, - READ_DATA, - READ_UNLOCKED, - RETURN_ZERO, - - /* write */ - WRITE_LOCKED, - WRITE_DATA, +} + +enum states { + /* both */ + READ_L1, + READ_L2, + READ_L3, + + /* read */ + READ_LOCKED, + READ_DATA, + READ_UNLOCKED, + RETURN_ZERO, + + /* write */ + WRITE_LOCKED, + WRITE_DATA, WRITE_L3, - WRITE_UNLOCKED, - - /* L3 Zero Path */ - ALLOC_DATA_L3z, - WRITE_L3_L3z, - - /* L3 Fault Path */ - ALLOC_DATA_L3f, - WRITE_L3_L3f, - - /* L2 Zero Path */ - ALLOC_DATA_L2z, - WRITE_L2_L2z, - ALLOC_L3_L2z, - WRITE_L2_L3z, - - /* L2 Fault Path */ - READ_L3_L2f, - ALLOC_DATA_L2f, - WRITE_L2_L2f, - ALLOC_L3_L2f, - WRITE_L2_L3f, - + WRITE_UNLOCKED, + + /* L3 Zero Path */ + ALLOC_DATA_L3z, + WRITE_L3_L3z, + + /* L3 Fault Path */ + ALLOC_DATA_L3f, + WRITE_L3_L3f, + + /* L2 Zero Path */ + ALLOC_DATA_L2z, + WRITE_L2_L2z, + ALLOC_L3_L2z, + WRITE_L2_L3z, + + /* L2 Fault Path */ + READ_L3_L2f, + ALLOC_DATA_L2f, + WRITE_L2_L2f, + ALLOC_L3_L2f, + WRITE_L2_L3f, + /* L1 Zero Path */ - ALLOC_DATA_L1z, - ALLOC_L3_L1z, - ALLOC_L2_L1z, - WRITE_L1_L1z, - + ALLOC_DATA_L1z, + ALLOC_L3_L1z, + ALLOC_L2_L1z, + WRITE_L1_L1z, + /* L1 Fault Path */ READ_L2_L1f, READ_L3_L1f, - ALLOC_DATA_L1f, - ALLOC_L3_L1f, - ALLOC_L2_L1f, - WRITE_L1_L1f, - -}; - -enum radix_offsets { - L1 = 0, - L2 = 1, - L3 = 2 -}; - - -static void read_cb(struct io_ret ret, void *param); -static void write_cb(struct io_ret ret, void *param); - + ALLOC_DATA_L1f, + ALLOC_L3_L1f, + ALLOC_L2_L1f, + WRITE_L1_L1f, + +}; + +enum radix_offsets { + L1 = 0, + L2 = 1, + L3 = 2 +}; + + +static void read_cb(struct io_ret ret, void *param); +static void write_cb(struct io_ret ret, void *param); + int vdi_read(vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param) -{ - struct io_req *req; - +{ + struct io_req *req; + if (!VALID_VADDR(vaddr)) return ERR_BAD_VADDR; /* Every second line in the bottom-level radix tree is used to */ /* store crc32 values etc. We shift the vadder here to achied this. */ vaddr <<= 1; - - req = (struct io_req *)malloc(sizeof (struct io_req)); + + req = (struct io_req *)malloc(sizeof (struct io_req)); if (req == NULL) return ERR_NOMEM; - + req->radix[0] = req->radix[1] = req->radix[2] = NULL; - req->op = IO_OP_READ; - req->root = vdi->radix_root; - req->lock = vdi->radix_lock; - req->vaddr = vaddr; - req->cb = cb; - req->param = param; - req->state = READ_LOCKED; - + req->op = IO_OP_READ; + req->root = vdi->radix_root; + req->lock = vdi->radix_lock; + req->vaddr = vaddr; + req->cb = cb; + req->param = param; + req->state = READ_LOCKED; + block_rlock(req->lock, L1_IDX(vaddr), read_cb, req); - - return 0; -} - - + + return 0; +} + + int vdi_write(vdi_t *vdi, u64 vaddr, char *block, io_cb_t cb, void *param) -{ - struct io_req *req; - +{ + struct io_req *req; + if (!VALID_VADDR(vaddr)) return ERR_BAD_VADDR; /* Every second line in the bottom-level radix tree is used to */ /* store crc32 values etc. We shift the vadder here to achied this. */ vaddr <<= 1; - - req = (struct io_req *)malloc(sizeof (struct io_req)); + + req = (struct io_req *)malloc(sizeof (struct io_req)); if (req == NULL) return ERR_NOMEM; req->radix[0] = req->radix[1] = req->radix[2] = NULL; @@ -180,97 +180,97 @@ req->bi.crc = (u32) crc32(0L, Z_NULL, 0); req->bi.crc = (u32) crc32(req->bi.crc, block, BLOCK_SIZE); req->bi.unused = 0xdeadbeef; - + req->cb = cb; req->param = param; - req->radix_addr[L1] = getid(req->root); /* for consistency */ + req->radix_addr[L1] = getid(req->root); /* for consistency */ req->state = WRITE_LOCKED; - + block_wlock(req->lock, L1_IDX(vaddr), write_cb, req); - - + + return 0; -} - +} + static void read_cb(struct io_ret ret, void *param) -{ - struct io_req *req = (struct io_req *)param; - radix_tree_node node; - u64 idx; - char *block; - void *req_param; - - DPRINTF("read_cb\n"); - /* get record */ - switch(req->state) { - - case READ_LOCKED: - - DPRINTF("READ_LOCKED\n"); - req->state = READ_L1; - block_read(getid(req->root), read_cb, req); - break; - - case READ_L1: /* block is the radix root */ - - DPRINTF("READ_L1\n"); - block = IO_BLOCK(ret); - if (block == NULL) goto fail; - node = (radix_tree_node) block; - idx = getid( node[L1_IDX(req->vaddr)] ); - free(block); - if ( idx == ZERO ) { +{ + struct io_req *req = (struct io_req *)param; + radix_tree_node node; + u64 idx; + char *block; + void *req_param; + + DPRINTF("read_cb\n"); + /* get record */ + switch(req->state) { + + case READ_LOCKED: + + DPRINTF("READ_LOCKED\n"); + req->state = READ_L1; + block_read(getid(req->root), read_cb, req); + break; + + case READ_L1: /* block is the radix root */ + + DPRINTF("READ_L1\n"); + block = IO_BLOCK(ret); + if (block == NULL) goto fail; + node = (radix_tree_node) block; + idx = getid( node[L1_IDX(req->vaddr)] ); + free(block); + if ( idx == ZERO ) { req->state = RETURN_ZERO; block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req); - } else { + } else { req->state = READ_L2; block_read(idx, read_cb, req); - } - break; - - case READ_L2: - - DPRINTF("READ_L2\n"); - block = IO_BLOCK(ret); - if (block == NULL) goto fail; - node = (radix_tree_node) block; - idx = getid( node[L2_IDX(req->vaddr)] ); - free(block); - if ( idx == ZERO ) { + } + break; + + case READ_L2: + + DPRINTF("READ_L2\n"); + block = IO_BLOCK(ret); + if (block == NULL) goto fail; + node = (radix_tree_node) block; + idx = getid( node[L2_IDX(req->vaddr)] ); + free(block); + if ( idx == ZERO ) { req->state = RETURN_ZERO; block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req); - } else { + } else { req->state = READ_L3; block_read(idx, read_cb, req); - } - break; - - case READ_L3: + } + break; + + case READ_L3: { struct block_info *bi; - DPRINTF("READ_L3\n"); - block = IO_BLOCK(ret); - if (block == NULL) goto fail; - node = (radix_tree_node) block; - idx = getid( node[L3_IDX(req->vaddr)] ); + DPRINTF("READ_L3\n"); + block = IO_BLOCK(ret); + if (block == NULL) goto fail; + node = (radix_tree_node) block; + idx = getid( node[L3_IDX(req->vaddr)] ); bi = (struct block_info *) &node[L3_IDX(req->vaddr) + 1]; req->bi = *bi; - free(block); - if ( idx == ZERO ) { + free(block); + if ( idx == ZERO ) { req->state = RETURN_ZERO; block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req); - } else { + } else { req->state = READ_DATA; block_read(idx, read_cb, req); - } - break; - } - case READ_DATA: + } + break; + } + case READ_DATA: { u32 crc; - DPRINTF("READ_DATA\n"); + DPRINTF("READ_DATA\n"); block = IO_BLOCK(ret); if (block == NULL) goto fail; @@ -301,151 +301,151 @@ /* goto fail; */ } - req->retval = ret; - req->state = READ_UNLOCKED; - block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req); - break; - } - case READ_UNLOCKED: + req->retval = ret; + req->state = READ_UNLOCKED; + block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req); + break; + } + case READ_UNLOCKED: { struct io_ret r; io_cb_t cb; - DPRINTF("READ_UNLOCKED\n"); - req_param = req->param; - r = req->retval; - cb = req->cb; - free(req); - cb(r, req_param); - break; - } - - case RETURN_ZERO: + DPRINTF("READ_UNLOCKED\n"); + req_param = req->param; + r = req->retval; + cb = req->cb; + free(req); + cb(r, req_param); + break; + } + + case RETURN_ZERO: { struct io_ret r; io_cb_t cb; DPRINTF("RETURN_ZERO\n"); req_param = req->param; - cb = req->cb; + cb = req->cb; free(req); - r.type = IO_BLOCK_T; - r.u.b = newblock(); + r.type = IO_BLOCK_T; + r.u.b = newblock(); cb(r, req_param); break; } - - default: - DPRINTF("*** Write: Bad state! (%d) ***\n", req->state); - goto fail; - } - - return; - - fail: + + default: + DPRINTF("*** Write: Bad state! (%d) ***\n", req->state); + goto fail; + } + + return; + + fail: { struct io_ret r; io_cb_t cb; DPRINTF("asyn_read had a read error.\n"); - req_param = req->param; - r = ret; - cb = req->cb; - free(req); - cb(r, req_param); - } - - -} - + req_param = req->param; + r = ret; + cb = req->cb; + free(req); + cb(r, req_param); + } + + +} + static void write_cb(struct io_ret r, void *param) -{ - struct io_req *req = (struct io_req *)param; - radix_tree_node node; - u64 a, addr; - void *req_param; +{ + struct io_req *req = (struct io_req *)param; + radix_tree_node node; + u64 a, addr; + void *req_param; struct block_info *bi; - - switch(req->state) { - - case WRITE_LOCKED: - - DPRINTF("WRITE_LOCKED (%llu)\n", L1_IDX(req->vaddr)); - req->state = READ_L1; - block_read(getid(req->root), write_cb, req); - break; - - case READ_L1: /* block is the radix root */ - - DPRINTF("READ_L1\n"); - node = (radix_tree_node) IO_BLOCK(r); - if (node == NULL) goto fail; - a = node[L1_IDX(req->vaddr)]; - addr = getid(a); - - req->radix_addr[L2] = addr; - req->radix[L1] = node; - - if ( addr == ZERO ) { + + switch(req->state) { + + case WRITE_LOCKED: + + DPRINTF("WRITE_LOCKED (%llu)\n", L1_IDX(req->vaddr)); + req->state = READ_L1; + block_read(getid(req->root), write_cb, req); + break; + + case READ_L1: /* block is the radix root */ + + DPRINTF("READ_L1\n"); + node = (radix_tree_node) IO_BLOCK(r); + if (node == NULL) goto fail; + a = node[L1_IDX(req->vaddr)]; + addr = getid(a); + + req->radix_addr[L2] = addr; + req->radix[L1] = node; + + if ( addr == ZERO ) { /* L1 empty subtree: */ req->state = ALLOC_DATA_L1z; block_alloc( req->block, write_cb, req ); - } else if ( !iswritable(a) ) { - /* L1 fault: */ - req->state = READ_L2_L1f; - block_read( addr, write_cb, req ); - } else { - req->state = READ_L2; - block_read( addr, write_cb, req ); - } - break; - - case READ_L2: - - DPRINTF("READ_L2\n"); - node = (radix_tree_node) IO_BLOCK(r); - if (node == NULL) goto fail; - a = node[L2_IDX(req->vaddr)]; - addr = getid(a); - - req->radix_addr[L3] = addr; - req->radix[L2] = node; - - if ( addr == ZERO ) { + } else if ( !iswritable(a) ) { + /* L1 fault: */ + req->state = READ_L2_L1f; + block_read( addr, write_cb, req ); + } else { + req->state = READ_L2; + block_read( addr, write_cb, req ); + } + break; + + case READ_L2: + + DPRINTF("READ_L2\n"); + node = (radix_tree_node) IO_BLOCK(r); + if (node == NULL) goto fail; + a = node[L2_IDX(req->vaddr)]; + addr = getid(a); + + req->radix_addr[L3] = addr; + req->radix[L2] = node; + + if ( addr == ZERO ) { /* L2 empty subtree: */ - req->state = ALLOC_DATA_L2z; - block_alloc( req->block, write_cb, req ); - } else if ( !iswritable(a) ) { - /* L2 fault: */ - req->state = READ_L3_L2f; - block_read( addr, write_cb, req ); - } else { - req->state = READ_L3; - block_read( addr, write_cb, req ); - } - break; - - case READ_L3: - - DPRINTF("READ_L3\n"); - node = (radix_tree_node) IO_BLOCK(r); - if (node == NULL) goto fail; - a = node[L3_IDX(req->vaddr)]; - addr = getid(a); - - req->radix[L3] = node; - - if ( addr == ZERO ) { - /* L3 fault: */ - req->state = ALLOC_DATA_L3z; - block_alloc( req->block, write_cb, req ); - } else if ( !iswritable(a) ) { - /* L3 fault: */ - req->state = ALLOC_DATA_L3f; - block_alloc( req->block, write_cb, req ); - } else { - req->state = WRITE_DATA; - block_write( addr, req->block, write_cb, req ); - } - break; - + req->state = ALLOC_DATA_L2z; + block_alloc( req->block, write_cb, req ); + } else if ( !iswritable(a) ) { + /* L2 fault: */ + req->state = READ_L3_L2f; + block_read( addr, write_cb, req ); + } else { + req->state = READ_L3; + block_read( addr, write_cb, req ); + } + break; + + case READ_L3: + + DPRINTF("READ_L3\n"); + node = (radix_tree_node) IO_BLOCK(r); + if (node == NULL) goto fail; + a = node[L3_IDX(req->vaddr)]; + addr = getid(a); + + req->radix[L3] = node; + + if ( addr == ZERO ) { + /* L3 fault: */ + req->state = ALLOC_DATA_L3z; + block_alloc( req->block, write_cb, req ); + } else if ( !iswritable(a) ) { + /* L3 fault: */ + req->state = ALLOC_DATA_L3f; + block_alloc( req->block, write_cb, req ); + } else { + req->state = WRITE_DATA; + block_write( addr, req->block, write_cb, req ); + } + break; + case WRITE_DATA: DPRINTF("WRITE_DATA\n"); @@ -459,268 +459,268 @@ block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req); break; - /* L3 Zero Path: */ - - case ALLOC_DATA_L3z: - - DPRINTF("ALLOC_DATA_L3z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3][L3_IDX(req->vaddr)] = a; + /* L3 Zero Path: */ + + case ALLOC_DATA_L3z: + + DPRINTF("ALLOC_DATA_L3z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 102; *bi = req->bi; - req->state = WRITE_L3_L3z; - block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req); - break; - - /* L3 Fault Path: */ - - case ALLOC_DATA_L3f: - - DPRINTF("ALLOC_DATA_L3f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3][L3_IDX(req->vaddr)] = a; + req->state = WRITE_L3_L3z; + block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req); + break; + + /* L3 Fault Path: */ + + case ALLOC_DATA_L3f: + + DPRINTF("ALLOC_DATA_L3f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 103; *bi = req->bi; - req->state = WRITE_L3_L3f; - block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req); - break; - - /* L2 Zero Path: */ - - case ALLOC_DATA_L2z: - - DPRINTF("ALLOC_DATA_L2z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3] = newblock(); - req->radix[L3][L3_IDX(req->vaddr)] = a; + req->state = WRITE_L3_L3f; + block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req); + break; + + /* L2 Zero Path: */ + + case ALLOC_DATA_L2z: + + DPRINTF("ALLOC_DATA_L2z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3] = newblock(); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 104; *bi = req->bi; - req->state = ALLOC_L3_L2z; - block_alloc( (char*)req->radix[L3], write_cb, req ); - break; - - case ALLOC_L3_L2z: - - DPRINTF("ALLOC_L3_L2z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L2][L2_IDX(req->vaddr)] = a; - req->state = WRITE_L2_L2z; - block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req); - break; - - /* L2 Fault Path: */ - - case READ_L3_L2f: - - DPRINTF("READ_L3_L2f\n"); - node = (radix_tree_node) IO_BLOCK(r); + req->state = ALLOC_L3_L2z; + block_alloc( (char*)req->radix[L3], write_cb, req ); + break; + + case ALLOC_L3_L2z: + + DPRINTF("ALLOC_L3_L2z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L2][L2_IDX(req->vaddr)] = a; + req->state = WRITE_L2_L2z; + block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req); + break; + + /* L2 Fault Path: */ + + case READ_L3_L2f: + + DPRINTF("READ_L3_L2f\n"); + node = (radix_tree_node) IO_BLOCK(r); clear_L3_w_bits(node); - if (node == NULL) goto fail; - a = node[L2_IDX(req->vaddr)]; - addr = getid(a); - - req->radix[L3] = node; + if (node == NULL) goto fail; + a = node[L2_IDX(req->vaddr)]; + addr = getid(a); + + req->radix[L3] = node; req->state = ALLOC_DATA_L2f; - block_alloc( req->block, write_cb, req ); - break; - - case ALLOC_DATA_L2f: - - DPRINTF("ALLOC_DATA_L2f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3][L3_IDX(req->vaddr)] = a; + block_alloc( req->block, write_cb, req ); + break; + + case ALLOC_DATA_L2f: + + DPRINTF("ALLOC_DATA_L2f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 105; *bi = req->bi; - req->state = ALLOC_L3_L2f; - block_alloc( (char*)req->radix[L3], write_cb, req ); - break; - - case ALLOC_L3_L2f: - - DPRINTF("ALLOC_L3_L2f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L2][L2_IDX(req->vaddr)] = a; - req->state = WRITE_L2_L2f; - block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req); - break; - - /* L1 Zero Path: */ - - case ALLOC_DATA_L1z: - - DPRINTF("ALLOC_DATA_L1z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3] = newblock(); - req->radix[L3][L3_IDX(req->vaddr)] = a; + req->state = ALLOC_L3_L2f; + block_alloc( (char*)req->radix[L3], write_cb, req ); + break; + + case ALLOC_L3_L2f: + + DPRINTF("ALLOC_L3_L2f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L2][L2_IDX(req->vaddr)] = a; + req->state = WRITE_L2_L2f; + block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req); + break; + + /* L1 Zero Path: */ + + case ALLOC_DATA_L1z: + + DPRINTF("ALLOC_DATA_L1z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3] = newblock(); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 106; *bi = req->bi; - req->state = ALLOC_L3_L1z; - block_alloc( (char*)req->radix[L3], write_cb, req ); - break; - - case ALLOC_L3_L1z: - - DPRINTF("ALLOC_L3_L1z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L2] = newblock(); - req->radix[L2][L2_IDX(req->vaddr)] = a; - req->state = ALLOC_L2_L1z; - block_alloc( (char*)req->radix[L2], write_cb, req ); - break; - - case ALLOC_L2_L1z: - - DPRINTF("ALLOC_L2_L1z\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L1][L1_IDX(req->vaddr)] = a; - req->state = WRITE_L1_L1z; - block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req); - break; - - /* L1 Fault Path: */ - - case READ_L2_L1f: - - DPRINTF("READ_L2_L1f\n"); - node = (radix_tree_node) IO_BLOCK(r); - clear_w_bits(node); - if (node == NULL) goto fail; - a = node[L2_IDX(req->vaddr)]; - addr = getid(a); - - req->radix_addr[L3] = addr; - req->radix[L2] = node; - - if (addr == ZERO) { + req->state = ALLOC_L3_L1z; + block_alloc( (char*)req->radix[L3], write_cb, req ); + break; + + case ALLOC_L3_L1z: + + DPRINTF("ALLOC_L3_L1z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L2] = newblock(); + req->radix[L2][L2_IDX(req->vaddr)] = a; + req->state = ALLOC_L2_L1z; + block_alloc( (char*)req->radix[L2], write_cb, req ); + break; + + case ALLOC_L2_L1z: + + DPRINTF("ALLOC_L2_L1z\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L1][L1_IDX(req->vaddr)] = a; + req->state = WRITE_L1_L1z; + block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req); + break; + + /* L1 Fault Path: */ + + case READ_L2_L1f: + + DPRINTF("READ_L2_L1f\n"); + node = (radix_tree_node) IO_BLOCK(r); + clear_w_bits(node); + if (node == NULL) goto fail; + a = node[L2_IDX(req->vaddr)]; + addr = getid(a); + + req->radix_addr[L3] = addr; + req->radix[L2] = node; + + if (addr == ZERO) { /* nothing below L2, create an empty L3 and alloc data. */ /* (So skip READ_L3_L1f.) */ req->radix[L3] = newblock(); req->state = ALLOC_DATA_L1f; block_alloc( req->block, write_cb, req ); - } else { + } else { req->state = READ_L3_L1f; block_read( addr, write_cb, req ); - } - break; - - case READ_L3_L1f: - - DPRINTF("READ_L3_L1f\n"); - node = (radix_tree_node) IO_BLOCK(r); + } + break; + + case READ_L3_L1f: + + DPRINTF("READ_L3_L1f\n"); + node = (radix_tree_node) IO_BLOCK(r); clear_L3_w_bits(node); - if (node == NULL) goto fail; - a = node[L2_IDX(req->vaddr)]; - addr = getid(a); - - req->radix[L3] = node; + if (node == NULL) goto fail; + a = node[L2_IDX(req->vaddr)]; + addr = getid(a); + + req->radix[L3] = node; req->state = ALLOC_DATA_L1f; - block_alloc( req->block, write_cb, req ); - break; - - case ALLOC_DATA_L1f: - - DPRINTF("ALLOC_DATA_L1f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L3][L3_IDX(req->vaddr)] = a; + block_alloc( req->block, write_cb, req ); + break; + + case ALLOC_DATA_L1f: + + DPRINTF("ALLOC_DATA_L1f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L3][L3_IDX(req->vaddr)] = a; bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1]; req->bi.unused = 107; *bi = req->bi; - req->state = ALLOC_L3_L1f; - block_alloc( (char*)req->radix[L3], write_cb, req ); - break; - - case ALLOC_L3_L1f: - - DPRINTF("ALLOC_L3_L1f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L2][L2_IDX(req->vaddr)] = a; - req->state = ALLOC_L2_L1f; - block_alloc( (char*)req->radix[L2], write_cb, req ); - break; - - case ALLOC_L2_L1f: - - DPRINTF("ALLOC_L2_L1f\n"); - addr = IO_ADDR(r); - a = writable(addr); - req->radix[L1][L1_IDX(req->vaddr)] = a; - req->state = WRITE_L1_L1f; - block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req); - break; - + req->state = ALLOC_L3_L1f; + block_alloc( (char*)req->radix[L3], write_cb, req ); + break; + + case ALLOC_L3_L1f: + + DPRINTF("ALLOC_L3_L1f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L2][L2_IDX(req->vaddr)] = a; + req->state = ALLOC_L2_L1f; + block_alloc( (char*)req->radix[L2], write_cb, req ); + break; + + case ALLOC_L2_L1f: + + DPRINTF("ALLOC_L2_L1f\n"); + addr = IO_ADDR(r); + a = writable(addr); + req->radix[L1][L1_IDX(req->vaddr)] = a; + req->state = WRITE_L1_L1f; + block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req); + break; + case WRITE_L3: - case WRITE_L3_L3z: - case WRITE_L3_L3f: - case WRITE_L2_L2z: - case WRITE_L2_L2f: - case WRITE_L1_L1z: - case WRITE_L1_L1f: - { - int i; - DPRINTF("DONE\n"); - /* free any saved node vals. */ - for (i=0; i<3; i++) + case WRITE_L3_L3z: + case WRITE_L3_L3f: + case WRITE_L2_L2z: + case WRITE_L2_L2f: + case WRITE_L1_L1z: + case WRITE_L1_L1f: + { + int i; + DPRINTF("DONE\n"); + /* free any saved node vals. */ + for (i=0; i<3; i++) if (req->radix[i] != 0) free(req->radix[i]); - req->retval = r; - req->state = WRITE_UNLOCKED; - block_wunlock(req->lock, L1_IDX(req->vaddr), write_cb, req); - break; - } - case WRITE_UNLOCKED: - { + req->retval = r; + req->state = WRITE_UNLOCKED; + block_wunlock(req->lock, L1_IDX(req->vaddr), write_cb, req); + break; + } + case WRITE_UNLOCKED: + { struct io_ret r; io_cb_t cb; - DPRINTF("WRITE_UNLOCKED!\n"); - req_param = req->param; - r = req->retval; - cb = req->cb; + DPRINTF("WRITE_UNLOCKED!\n"); + req_param = req->param; + r = req->retval; + cb = req->cb; free(req); - cb(r, req_param); - break; - } - - default: - DPRINTF("*** Write: Bad state! (%d) ***\n", req->state); - goto fail; - } - - return; - - fail: + cb(r, req_param); + break; + } + + default: + DPRINTF("*** Write: Bad state! (%d) ***\n", req->state); + goto fail; + } + + return; + + fail: { struct io_ret r; io_cb_t cb; int i; DPRINTF("asyn_write had a read error mid-way.\n"); - req_param = req->param; - cb = req->cb; - r.type = IO_INT_T; - r.u.i = -1; + req_param = req->param; + cb = req->cb; + r.type = IO_INT_T; + r.u.i = -1; /* free any saved node vals. */ for (i=0; i<3; i++) if (req->radix[i] != 0) free(req->radix[i]); - free(req); - cb(r, req_param); - } -} - + free(req); + cb(r, req_param); + } +} + char *vdi_read_s(vdi_t *vdi, u64 vaddr) { pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |