diff -r 884447d31e86 -r 47edf571a62d linux-2.6-xen-sparse/drivers/xen/xenidc/Makefile --- a/linux-2.6-xen-sparse/drivers/xen/xenidc/Makefile Sun Nov 20 17:43:54 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenidc/Makefile Sun Nov 20 17:44:51 2005 @@ -10,3 +10,4 @@ xenidc-objs += xenidc_wrapping.o xenidc-objs += xenidc_grant_table.o xenidc-objs += xenidc_vaddress.o +xenidc-objs += xenidc_gnttab_channel.o diff -r 884447d31e86 -r 47edf571a62d linux-2.6-xen-sparse/drivers/xen/xenidc/xenidc_gnttab_channel.c --- /dev/null Sun Nov 20 17:43:54 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenidc/xenidc_gnttab_channel.c Sun Nov 20 17:44:51 2005 @@ -0,0 +1,1639 @@ +/*****************************************************************************/ +/* This is a class which implements a grant-tables based inter-domain */ +/* message channel. The implementation of the bring-up and tear-down */ +/* handshaking is left to a derived class. */ +/* This class is used by xenidc_xbgt_channel (which implements bring-up and */ +/* teardown using xenbus) which is in turn used to implement the */ +/* xenidc_endpoint class. */ +/* */ +/* Copyright (c) 2005 Harry Butterworth IBM Corporation */ +/* */ +/* This program is free software; you can redistribute it and/or modify it */ +/* under the terms of the GNU General Public License as published by the */ +/* Free Software Foundation; either version 2 of the License, or (at your */ +/* option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, but */ +/* WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General */ +/* Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License along */ +/* with this program; if not, write to the Free Software Foundation, Inc., */ +/* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include "xenidc_trace.h" +#include "xenidc_channel_ring.h" + +static inline void xenidc_gnttab_channel_target_resource_init + (xenidc_gnttab_channel_target_resource * resource, + xenidc_gnttab_channel * channel, xenidc_callback_function * callback) { + trace(); + + xenidc_channel_message_init(&resource->message, callback); + + resource->channel = channel; +} + +static inline struct list_head *xenidc_gnttab_channel_target_resource_to_link + (xenidc_gnttab_channel_target_resource * resource) { + trace(); + + return xenidc_channel_message_to_link(&resource->message); +} + +static inline xenidc_channel_message + *xenidc_gnttab_channel_target_resource_to_message + (xenidc_gnttab_channel_target_resource * resource) { + trace(); + + return &resource->message; +} + +static inline xenidc_gnttab_channel_target_resource + *xenidc_gnttab_channel_target_resource_callback_to(xenidc_callback * + callback) { + trace(); + + return container_of + (xenidc_channel_message_callback_to(callback), + xenidc_gnttab_channel_target_resource, message); +} + +static inline xenidc_gnttab_channel + *xenidc_gnttab_channel_target_resource_query_channel + (xenidc_gnttab_channel_target_resource * resource) { + trace(); + + return resource->channel; +} + +typedef enum { + xenidc_gnttab_channel_stimulus_c1r, /* phase one connect request */ + xenidc_gnttab_channel_stimulus_c2r, /* phase two connect request */ + xenidc_gnttab_channel_stimulus_mqr, /* message queued */ + xenidc_gnttab_channel_stimulus_d2r, /* phase two disconnect request */ + xenidc_gnttab_channel_stimulus_d1r, /* phase one disconnect request */ + xenidc_gnttab_channel_stimulus_sir, /* send interrupt */ + xenidc_gnttab_channel_stimulus_rir, /* recv interrupt */ + xenidc_gnttab_channel_stimulus_c1c, /* phase one connect completed */ + xenidc_gnttab_channel_stimulus_c2s, /* phase two connect successful */ + xenidc_gnttab_channel_stimulus_c2f, /* phase two connect failed */ + xenidc_gnttab_channel_stimulus_ccc, /* connect client completed */ + xenidc_gnttab_channel_stimulus_dcc, /* disconnect client completed */ + xenidc_gnttab_channel_stimulus_d2c, /* phase two disconnect completed */ + xenidc_gnttab_channel_stimulus_d1c, /* phase one disconnect completed */ + xenidc_gnttab_channel_stimulus_ksc, /* kick send ring completed */ + xenidc_gnttab_channel_stimulus_krc, /* kick recv ring completed */ + xenidc_gnttab_channel_stimulus_trc, /* target resource completed */ + xenidc_gnttab_channel_stimulus_tri /* target resources idle */ +} xenidc_gnttab_channel_stimulus; + +static void xenidc_gnttab_channel_handle_stimulus + (xenidc_gnttab_channel * channel, xenidc_gnttab_channel_stimulus stimulus); + +static void xenidc_gnttab_channel_submit_message + (xenidc_channel * base_channel, xenidc_channel_message * message); + +static int xenidc_gnttab_channel_init_or_exit + (xenidc_gnttab_channel * channel, int exit); + +int xenidc_gnttab_channel_init + (xenidc_gnttab_channel * channel, + void (*protocol_error) (xenidc_gnttab_channel * channel) + ) { + trace(); + + xenidc_channel_init + (&channel->channel, xenidc_gnttab_channel_submit_message); + + channel->protocol_error = protocol_error; + + return xenidc_gnttab_channel_init_or_exit(channel, 0); +} + +static void xenidc_gnttab_channel_do_phase_one_connect_1(void *data); + +static void xenidc_gnttab_channel_do_phase_two_connect_1(void *data); + +static void xenidc_gnttab_channel_connect_client_1(void *data); + +static void xenidc_gnttab_channel_disconnect_client_1(void *data); + +static void xenidc_gnttab_channel_disconnect_client_2 + (xenidc_callback * callback); + +static void xenidc_gnttab_channel_do_phase_two_disconnect_1(void *data); + +static void xenidc_gnttab_channel_do_phase_one_disconnect_1(void *data); + +static void xenidc_gnttab_channel_kick_send_ring_1(void *data); + +static void xenidc_gnttab_channel_kick_recv_ring_1(void *data); + +static void xenidc_gnttab_channel_kick_recv_ring_2(xenidc_callback * callback); + +static int xenidc_gnttab_channel_init_or_exit + (xenidc_gnttab_channel * channel, int exit) { + trace(); + + { + int return_value = 0; + + if (exit) { + goto EXIT; + } + + channel->send_irq_context = channel; + channel->recv_irq_context = channel; + + channel->send_ring = (void *)__get_free_page(GFP_KERNEL); + + if (channel->send_ring == NULL) { + trace0("failed to allocate send ring"); + + return_value = -ENOMEM; + + goto EXIT_NO_SEND_RING; + } + + if ((channel->recv_ring_area = alloc_vm_area(PAGE_SIZE)) + == NULL) { + trace0("failed to allocate receive ring area"); + + return_value = -ENOMEM; + + goto EXIT_NO_RING_AREA; + } + + return_value = + gnttab_alloc_grant_references(1, &channel->grant_ref_pool); + + if (return_value != 0) { + trace0("failed to allocate grant reference pool"); + + goto EXIT_NO_GRANT_REF; + } + + spin_lock_init(&channel->lock); + + channel->state = xenidc_gnttab_channel_state_i; + + INIT_LIST_HEAD(&channel->message_list); + + xenidc_work_init + (&channel->do_phase_one_connect_1_work, + xenidc_gnttab_channel_do_phase_one_connect_1, channel); + + xenidc_work_init + (&channel->do_phase_two_connect_1_work, + xenidc_gnttab_channel_do_phase_two_connect_1, channel); + + xenidc_work_init + (&channel->connect_client_1_work, + xenidc_gnttab_channel_connect_client_1, channel); + + xenidc_work_init + (&channel->disconnect_client_1_work, + xenidc_gnttab_channel_disconnect_client_1, channel); + + xenidc_callback_init + (&channel->disconnect_client_2_callback, + xenidc_gnttab_channel_disconnect_client_2); + + xenidc_work_init + (&channel->do_phase_two_disconnect_1_work, + xenidc_gnttab_channel_do_phase_two_disconnect_1, channel); + + xenidc_work_init + (&channel->do_phase_one_disconnect_1_work, + xenidc_gnttab_channel_do_phase_one_disconnect_1, channel); + + xenidc_work_init + (&channel->kick_send_ring_1_work, + xenidc_gnttab_channel_kick_send_ring_1, channel); + + xenidc_work_init + (&channel->kick_recv_ring_1_work, + xenidc_gnttab_channel_kick_recv_ring_1, channel); + + { + int i; + + for (i = 0; + i < XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT; + i++) { + xenidc_gnttab_channel_target_resource *resource + = &channel->target_resources[i]; + + xenidc_gnttab_channel_target_resource_init + (resource, + channel, + xenidc_gnttab_channel_kick_recv_ring_2); + + channel->target_resource_free[i] = 1; + } + } + + channel->first_target_resource = 0; + channel->next_target_resource = 0; + + channel->send_ring_kick_out = 0; + channel->recv_ring_kick_out = 0; + + return 0; + + EXIT: + + gnttab_free_grant_references(channel->grant_ref_pool); + + EXIT_NO_GRANT_REF: + + free_vm_area(channel->recv_ring_area); + + EXIT_NO_RING_AREA: + + free_page((unsigned long)channel->send_ring); + + EXIT_NO_SEND_RING: + + return return_value; + } +} + +void xenidc_gnttab_channel_phase_one_connect + (xenidc_gnttab_channel * channel, + xenidc_gnttab_channel_phase_one_connect_request * request) { + trace(); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + channel->current_callback = &request->callback; + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_c1r); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +void xenidc_gnttab_channel_reset_ring(xenidc_gnttab_channel * channel) +{ + trace(); + + channel->recv_ring_offset = 0; + + memset(channel->send_ring, 0, PAGE_SIZE); +} + +void xenidc_gnttab_channel_phase_two_connect + (xenidc_gnttab_channel * channel, + xenidc_gnttab_channel_phase_two_connect_request * request) { + trace(); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + channel->current_callback = &request->callback; + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_c2r); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +static void xenidc_gnttab_channel_submit_message + (xenidc_channel * base_channel, xenidc_channel_message * message) { + trace(); + + /* MUST MAINTAIN RELATIVE REQUEST ORDER ON THE SUBMISSION PATH */ + + { + xenidc_gnttab_channel *channel = + xenidc_gnttab_channel_channel_to(base_channel); + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + list_add_tail + (xenidc_channel_message_to_link(message), + &channel->message_list); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_mqr); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +void xenidc_gnttab_channel_phase_two_disconnect + (xenidc_gnttab_channel * channel, xenidc_callback * callback) { + trace(); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + channel->current_callback = callback; + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_d2r); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +void xenidc_gnttab_channel_phase_one_disconnect + (xenidc_gnttab_channel * channel, xenidc_callback * callback) { + trace(); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + channel->current_callback = callback; + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_d1r); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +void xenidc_gnttab_channel_exit(xenidc_gnttab_channel * channel) +{ + trace(); + + (void)xenidc_gnttab_channel_init_or_exit(channel, 1); +} + +static irqreturn_t xenidc_gnttab_channel_send_interrupt + (int irq, void *context, struct pt_regs *ptregs) { + trace(); + + { + xenidc_gnttab_channel *channel = + *(xenidc_gnttab_channel **) context; + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_sir); + + spin_unlock_irqrestore(&channel->lock, flags); + } + + return IRQ_HANDLED; +} + +static irqreturn_t xenidc_gnttab_channel_recv_interrupt + (int irq, void *context, struct pt_regs *ptregs) { + trace(); + + { + xenidc_gnttab_channel *channel = + *(xenidc_gnttab_channel **) context; + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_rir); + + spin_unlock_irqrestore(&channel->lock, flags); + } + + return IRQ_HANDLED; +} + +static void xenidc_gnttab_channel_invalid_stimulus + (xenidc_gnttab_channel * channel, xenidc_gnttab_channel_stimulus stimulus); + +static void xenidc_gnttab_channel_do_phase_one_connect + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_do_phase_two_connect + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_connect_client + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_disconnect_client + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_do_phase_two_disconnect + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_do_phase_one_disconnect + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_kick_send_ring + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_kick_recv_ring + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_complete_current_callback + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_fail_current_callback + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_fail_out_messages + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_test_target_resources + (xenidc_gnttab_channel * channel); + +static void xenidc_gnttab_channel_handle_stimulus + (xenidc_gnttab_channel * channel, xenidc_gnttab_channel_stimulus stimulus) { + trace3 + ("channel %p in state %d received stimulus %d", + channel, channel->state, stimulus); + + switch (channel->state) { + case xenidc_gnttab_channel_state_i: + /* Interface disconnected. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_c1r: + channel->state = xenidc_gnttab_channel_state_i_c1r; + xenidc_gnttab_channel_do_phase_one_connect(channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r: + /* Interface phase one connecting. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + /* do phase one connect in progress */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_sir: + break; + case xenidc_gnttab_channel_stimulus_c1c: + channel->state = xenidc_gnttab_channel_state_i_c1r_c1c; + xenidc_gnttab_channel_complete_current_callback + (channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c: + /* Interface phase one connected. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + /* Phase one connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_c2r: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r; + xenidc_gnttab_channel_do_phase_two_connect(channel); + break; + case xenidc_gnttab_channel_stimulus_d1r: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_d1r; + xenidc_gnttab_channel_do_phase_one_disconnect(channel); + break; + case xenidc_gnttab_channel_stimulus_sir: + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r: + /* Interface phase two connecting. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + /* do phase two connect in progress */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_c2s: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s; + xenidc_gnttab_channel_connect_client(channel); + break; + case xenidc_gnttab_channel_stimulus_c2f: + channel->state = xenidc_gnttab_channel_state_i_c1r_c1c; + xenidc_gnttab_channel_fail_current_callback(channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_d1r: + /* Interface phase one disconnecting. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + /* do phase one disconnect in progress */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_sir: + break; + case xenidc_gnttab_channel_stimulus_d1c: + channel->state = xenidc_gnttab_channel_state_i; + xenidc_gnttab_channel_complete_current_callback + (channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s: + /* Interface phase two connecting. */ + /* Client connecting. */ + /* Maybe messages queued. */ + /* Kick send idle. */ + /* Kick recv idle. */ + /* Target resources idle. */ + /* Phase two connected. */ + /* connect client in progress */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_mqr: + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_ccc: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc; + xenidc_gnttab_channel_complete_current_callback + (channel); + xenidc_gnttab_channel_kick_send_ring(channel); + xenidc_gnttab_channel_kick_recv_ring(channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc: + /* Interface phase two connected. */ + /* Client connected. */ + /* Maybe messages queued. */ + /* Maybe kick send in progress. */ + /* Maybe kick recv in progress. */ + /* Maybe target resources busy. */ + /* Phase two connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_mqr: + xenidc_gnttab_channel_kick_send_ring(channel); + break; + case xenidc_gnttab_channel_stimulus_d2r: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r; + xenidc_gnttab_channel_kick_send_ring(channel); + xenidc_gnttab_channel_kick_recv_ring(channel); + break; + case xenidc_gnttab_channel_stimulus_sir: + xenidc_gnttab_channel_kick_send_ring(channel); + break; + case xenidc_gnttab_channel_stimulus_rir: + xenidc_gnttab_channel_kick_recv_ring(channel); + break; + case xenidc_gnttab_channel_stimulus_ksc: + case xenidc_gnttab_channel_stimulus_krc: + break; + case xenidc_gnttab_channel_stimulus_trc: + case xenidc_gnttab_channel_stimulus_tri: + xenidc_gnttab_channel_kick_recv_ring(channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r: + /* Interface phase two disconnecting. */ + /* Client connected. */ + /* Maybe messages queued. */ + /* Kick send in progress. */ + /* Kick recv in progress. */ + /* Maybe target resources busy. */ + /* Phase two connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_mqr: + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_ksc: + case xenidc_gnttab_channel_stimulus_krc: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc; + break; + case xenidc_gnttab_channel_stimulus_trc: + case xenidc_gnttab_channel_stimulus_tri: + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc: + /* Interface phase two disconnecting. */ + /* Client connected. */ + /* Maybe messages queued. */ + /* One of kick send /recv in progress. */ + /* Maybe target resources busy. */ + /* Phase two connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_mqr: + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_ksc: + case xenidc_gnttab_channel_stimulus_krc: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc; + xenidc_gnttab_channel_disconnect_client(channel); + xenidc_gnttab_channel_fail_out_messages(channel); + break; + case xenidc_gnttab_channel_stimulus_trc: + case xenidc_gnttab_channel_stimulus_tri: + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc: + /* Interface phase two disconnecting. */ + /* Client disconnecting. */ + /* No messages queued. */ + /* Kick send /recv idle. */ + /* Maybe target resources busy. */ + /* Phase two connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_mqr: + xenidc_gnttab_channel_fail_out_messages(channel); + break; + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_dcc: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc; + xenidc_gnttab_channel_test_target_resources(channel); + break; + case xenidc_gnttab_channel_stimulus_trc: + case xenidc_gnttab_channel_stimulus_tri: + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc: + /* Interface phase two disconnecting. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send /recv idle. */ + /* Testing target resources/ target resources busy */ + /* Phase two connected. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + case xenidc_gnttab_channel_stimulus_trc: + break; + case xenidc_gnttab_channel_stimulus_tri: + channel->state = + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri; + xenidc_gnttab_channel_do_phase_two_disconnect(channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + case xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri: + /* Interface phase two disconnecting. */ + /* Client disconnected. */ + /* No messages queued. */ + /* Kick send /recv idle. */ + /* Target resources idle. */ + /* Phase two disconnecting. */ + switch (stimulus) { + case xenidc_gnttab_channel_stimulus_sir: + case xenidc_gnttab_channel_stimulus_rir: + break; + case xenidc_gnttab_channel_stimulus_d2c: + channel->state = xenidc_gnttab_channel_state_i_c1r_c1c; + xenidc_gnttab_channel_complete_current_callback + (channel); + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, + stimulus); + break; + } + break; + default: + xenidc_gnttab_channel_invalid_stimulus(channel, stimulus); + break; + } +} + +static void xenidc_gnttab_channel_invalid_stimulus + (xenidc_gnttab_channel * channel, xenidc_gnttab_channel_stimulus stimulus) { + trace(); + + printk + (KERN_ERR "xenidc: channel %p in state %d" + "received invalid stimulus %d", channel, channel->state, stimulus); +} + +static void xenidc_gnttab_channel_do_phase_one_connect + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->do_phase_one_connect_1_work); +} + +static void xenidc_gnttab_channel_do_phase_one_connect_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + xenidc_gnttab_channel_phase_one_connect_request *request = + xenidc_gnttab_channel_phase_one_connect_request_callback_to + (channel->current_callback); + + { + evtchn_op_t op = { + .cmd = EVTCHNOP_alloc_unbound, + .u.alloc_unbound.dom = DOMID_SELF, + .u.alloc_unbound.remote_dom = + request->remote_domain_id + }; + + BUG_ON(HYPERVISOR_event_channel_op(&op) != 0); + + channel->send_event_channel = op.u.alloc_unbound.port; + } + + { + int error = bind_evtchn_to_irqhandler + (channel->send_event_channel, + xenidc_gnttab_channel_send_interrupt, + SA_SAMPLE_RANDOM, + "xenidc", + &channel->send_irq_context); + + BUG_ON(error < 0); + + channel->send_irq = error; + } + + channel->send_ring_ref = + gnttab_claim_grant_reference(&channel->grant_ref_pool); + + channel->remote_domain_id = request->remote_domain_id; + + gnttab_grant_foreign_access_ref(channel->send_ring_ref, request->remote_domain_id, virt_to_mfn(channel->send_ring), 1 /* readonly */ + ); + + request->send_ring_ref = channel->send_ring_ref; + request->send_event_channel = channel->send_event_channel; + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_c1c); + + spin_unlock_irqrestore(&channel->lock, flags); + } + } +} + +static void xenidc_gnttab_channel_do_phase_two_connect + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->do_phase_two_connect_1_work); +} + +static void xenidc_gnttab_channel_do_phase_two_connect_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + xenidc_gnttab_channel_phase_two_connect_request *request = + xenidc_gnttab_channel_phase_two_connect_request_callback_to + (channel->current_callback); + + { + struct gnttab_map_grant_ref op = { + .host_addr = + (unsigned long)channel->recv_ring_area-> + addr, + .flags = GNTMAP_host_map | GNTMAP_readonly, + .dom = request->remote_domain_id, + .ref = request->recv_ring_ref + }; + + lock_vm_area(channel->recv_ring_area); + + BUG_ON + (HYPERVISOR_grant_table_op + (GNTTABOP_map_grant_ref, &op, 1)); + + unlock_vm_area(channel->recv_ring_area); + + if (op.handle < 0) { + trace0("failed to map remote page"); + + goto EXIT_NO_MAPPING; + } + + channel->recv_ring_handle = op.handle; + } + + { + evtchn_op_t op = { + .cmd = EVTCHNOP_bind_interdomain, + .u.bind_interdomain.remote_dom = + request->remote_domain_id, + .u.bind_interdomain.remote_port = + request->recv_event_channel + }; + + if (HYPERVISOR_event_channel_op(&op) != 0) { + trace0 + ("failed to bind to remote event channel"); + + goto EXIT_NO_BIND; + } + + channel->recv_event_channel = + op.u.bind_interdomain.local_port; + } + + channel->recv_irq = bind_evtchn_to_irqhandler + (channel->recv_event_channel, + xenidc_gnttab_channel_recv_interrupt, + 0, "xenidc", &channel->recv_irq_context); + + if (channel->recv_irq < 0) { + trace0("failed to bind remote irq"); + + goto EXIT_NO_IRQ; + } + + channel->recv_ring_lbr = xenidc_vaddress_create_lbr + (((xenidc_channel_ring_header *) + channel->recv_ring_area->addr) + 1, + PAGE_SIZE - sizeof(xenidc_channel_ring_header) + ); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_c2s); + + spin_unlock_irqrestore(&channel->lock, flags); + } + + return; + + EXIT_NO_IRQ: + + { + evtchn_op_t op = { + .cmd = EVTCHNOP_close, + .u.close.port = channel->recv_event_channel + }; + + BUG_ON(HYPERVISOR_event_channel_op(&op) != 0); + } + + EXIT_NO_BIND: + + { + struct gnttab_unmap_grant_ref op; + + op.host_addr = + (unsigned long)channel->recv_ring_area->addr; + op.handle = channel->recv_ring_handle; + op.dev_bus_addr = 0; + + lock_vm_area(channel->recv_ring_area); + + BUG_ON + (HYPERVISOR_grant_table_op + (GNTTABOP_unmap_grant_ref, &op, 1) + ); + + unlock_vm_area(channel->recv_ring_area); + } + + EXIT_NO_MAPPING: + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_c2f); + + spin_unlock_irqrestore(&channel->lock, flags); + } + } +} + +static void xenidc_gnttab_channel_connect_client + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->connect_client_1_work); +} + +static void xenidc_gnttab_channel_connect_client_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + xenidc_channel_connect(&channel->channel); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_ccc); + + spin_unlock_irqrestore(&channel->lock, flags); + } + } +} + +static void xenidc_gnttab_channel_disconnect_client + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->disconnect_client_1_work); +} + +static void xenidc_gnttab_channel_disconnect_client_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + xenidc_channel_disconnect + (&channel->channel, &channel->disconnect_client_2_callback); + } +} + +static void xenidc_gnttab_channel_disconnect_client_2 + (xenidc_callback * callback) { + trace(); + + { + xenidc_gnttab_channel *channel = container_of + (callback, xenidc_gnttab_channel, + disconnect_client_2_callback); + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_dcc); + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +static void xenidc_gnttab_channel_do_phase_two_disconnect + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->do_phase_two_disconnect_1_work); +} + +static void xenidc_gnttab_channel_do_phase_two_disconnect_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + unbind_from_irqhandler + (channel->recv_irq, &channel->recv_irq_context); + + { + struct gnttab_unmap_grant_ref op; + + op.host_addr = + (unsigned long)channel->recv_ring_area->addr; + op.handle = channel->recv_ring_handle; + op.dev_bus_addr = 0; + + lock_vm_area(channel->recv_ring_area); + + BUG_ON + (HYPERVISOR_grant_table_op + (GNTTABOP_unmap_grant_ref, &op, 1) + ); + + unlock_vm_area(channel->recv_ring_area); + } + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_d2c); + + spin_unlock_irqrestore(&channel->lock, flags); + } + } +} + +static void xenidc_gnttab_channel_do_phase_one_disconnect + (xenidc_gnttab_channel * channel) { + trace(); + + (void)xenidc_work_schedule(&channel->do_phase_one_disconnect_1_work); +} + +static void xenidc_gnttab_channel_do_phase_one_disconnect_1(void *data) +{ + trace(); + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + if (gnttab_end_foreign_access_ref(channel->send_ring_ref, 1)) { + gnttab_release_grant_reference + (&channel->grant_ref_pool, channel->send_ring_ref); + + unbind_from_irqhandler + (channel->send_irq, &channel->send_irq_context); + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + xenidc_gnttab_channel_handle_stimulus + (channel, + xenidc_gnttab_channel_stimulus_d1c); + + spin_unlock_irqrestore(&channel->lock, flags); + } + } else { + printk + (KERN_WARNING + "xenidc_gnttab_channel failed to end foreign access " + "granted to domain id %d. Will retry in 5 seconds.\n", + channel->remote_domain_id); + + init_timer(&channel->timer); + + channel->timer.data = (unsigned long)channel; + channel->timer.expires = jiffies + (5 * HZ); + channel->timer.function = (void (*)(unsigned long)) + xenidc_gnttab_channel_do_phase_one_disconnect; + + add_timer(&channel->timer); + } + } +} + +static void xenidc_gnttab_channel_kick_send_ring + (xenidc_gnttab_channel * channel) { + trace(); + + /* MUST MAINTAIN RELATIVE REQUEST ORDER ON THE SUBMISSION PATH */ + + if (!channel->send_ring_kick_out) { + channel->send_ring_kick_out = 1; + + (void)xenidc_work_schedule(&channel->kick_send_ring_1_work); + } +} + +static void xenidc_gnttab_channel_kick_send_ring_1(void *data) +{ + trace(); + + /* MUST MAINTAIN RELATIVE REQUEST ORDER ON THE SUBMISSION PATH */ + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + /* Create a reference to the send ring buffer. */ + + xenidc_local_buffer_reference ring_lbr = + xenidc_vaddress_create_lbr(((xenidc_channel_ring_header *) + channel->send_ring) + 1, + PAGE_SIZE - + sizeof + (xenidc_channel_ring_header) + ); + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + for (;;) { + int notify = 0; + + xenidc_local_buffer_reference wrapping_lbr; + + xenidc_channel_message *message; + + /* Ensure we see latest amount of free space granted by the */ + /* other side this time we go around the 'for' loop. */ + + mb(); + + /* Create a reference to the free space in the send ring buffer. */ + /* We use a wrapping reference to the above buffer so that we */ + /* can copy into the buffer and wrap automatically. */ + + /* We mustn't trust the 'other_ring_consumer_offset' so we % it */ + /* to make sure it's in range. */ + /* The send ring is read only so the other side can't corrupt */ + /* our 'this_ring_producer_offset'. */ + + wrapping_lbr = xenidc_wrapping_create_lbr + (&ring_lbr, + ((xenidc_channel_ring_header *) channel-> + send_ring)->this_ring_producer_offset, + ((xenidc_channel_ring_header *) + channel->recv_ring_area->addr) + ->other_ring_consumer_offset % + xenidc_local_buffer_reference_query_byte_count + (&ring_lbr), xenidc_wrapping_client_type_producer); + + while ((!list_empty(&channel->message_list)) + && + (xenidc_local_buffer_reference_query_byte_count + (&wrapping_lbr) + > (sizeof(xenidc_channel_ring_element_header) + + + xenidc_local_buffer_reference_query_byte_count + (& + (message = list_entry + (channel->message_list.next, + xenidc_channel_message, + XENIDC_CHANNEL_MESSAGE_LINK) +) +->message_lbr) + ) + ) + ) { + list_del_init(xenidc_channel_message_to_link + (message)); + + /* send_ring_kick_out is still 1 here so we can release the */ + /* lock without risking a second concurrent invocation of */ + /* this function. */ + + spin_unlock_irqrestore(&channel->lock, flags); + + { + xenidc_channel_ring_element_header + header; + xenidc_local_buffer_reference + header_lbr; + xenidc_concatenate_base base; + xenidc_local_buffer_reference + element_lbr; + + memset(&header, 0, sizeof(header)); + + header.length = + xenidc_local_buffer_reference_query_byte_count + (&message->message_lbr); + + header_lbr = + xenidc_vaddress_create_lbr(&header, + sizeof + (header)); + + element_lbr = + xenidc_concatenate_create_lbr(&base, + &header_lbr, + &message-> + message_lbr); + + xenidc_local_buffer_reference_advance + (&wrapping_lbr, + xenidc_local_buffer_reference_copy + (&wrapping_lbr, &element_lbr) + ); + } + + xenidc_callback_success + (xenidc_channel_message_to_callback + (message)); + + notify = 1; + + spin_lock_irqsave(&channel->lock, flags); + } + + if (notify) { + /* send_ring_kick_out is still 1 here so we can release the */ + /* lock without risking a second concurrent invocation of */ + /* this function. */ + + spin_unlock_irqrestore(&channel->lock, flags); + + wmb(); /* Ensure contents of ring written before offset. */ + + ((xenidc_channel_ring_header *) channel-> + send_ring)->this_ring_producer_offset = +xenidc_local_buffer_reference_query_byte_offset(&wrapping_lbr); + + notify_remote_via_irq(channel->send_irq); + + spin_lock_irqsave(&channel->lock, flags); + } else { + /* notify is 0 so we haven't dropped the lock since */ + /* performing a mb() and checking to see whether we can make */ + /* progress. We can't. It's safe to exit. */ + + channel->send_ring_kick_out = 0; + + xenidc_gnttab_channel_handle_stimulus + (channel, + xenidc_gnttab_channel_stimulus_ksc); + + break; + } + } + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +static void xenidc_gnttab_channel_kick_recv_ring + (xenidc_gnttab_channel * channel) { + trace(); + + /* MUST MAINTAIN RELATIVE REQUEST ORDER ON THE SUBMISSION PATH */ + + if (!channel->recv_ring_kick_out) { + channel->recv_ring_kick_out = 1; + + (void)xenidc_work_schedule(&channel->kick_recv_ring_1_work); + } +} + +static void xenidc_gnttab_channel_kick_recv_ring_1(void *data) +{ + trace(); + + /* MUST MAINTAIN RELATIVE REQUEST ORDER ON THE SUBMISSION PATH */ + + { + xenidc_gnttab_channel *channel = (xenidc_gnttab_channel *) data; + + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + for (;;) { + int progress = 0; + int error = 0; + + /* Create a reference to the elements in the recv ring buffer. */ + /* We use a wrapping reference to the buffer so that we can copy */ + /* out of the buffer and wrap automatically. */ + + xenidc_local_buffer_reference wrapping_lbr; + + /* Ensure we see the latest amount of content in the ring this */ + /* time we go around the 'for' loop. */ + + mb(); + + /* Create a reference to the used space in the recv ring buffer. */ + /* We use a wrapping reference to the above buffer so that we */ + /* can copy out of the buffer and wrap automatically. */ + + /* We mustn't trust the 'this_ring_producer_offset' so we % it */ + /* to make sure it's in range. */ + + wrapping_lbr = xenidc_wrapping_create_lbr + (&channel->recv_ring_lbr, + channel->recv_ring_offset, + ((xenidc_channel_ring_header *) + channel->recv_ring_area->addr) + ->this_ring_producer_offset % + xenidc_local_buffer_reference_query_byte_count + (&channel->recv_ring_lbr), + xenidc_wrapping_client_type_consumer); + + rmb(); /* Ensure we see latest data in wrapping buffer. */ + + while + (channel-> + target_resource_free[channel->next_target_resource] + && + (xenidc_local_buffer_reference_query_byte_count + (&wrapping_lbr) + != 0) + ) { + xenidc_local_buffer_reference element = + wrapping_lbr; + + xenidc_gnttab_channel_target_resource *resource + = + &channel->target_resources[channel-> + next_target_resource]; + + { + xenidc_channel_ring_element_header + header; + + if ((xenidc_local_buffer_reference_copy_out(&wrapping_lbr, &header, sizeof(header)) + == sizeof(header) + ) + && + (xenidc_local_buffer_reference_subrange + (&element, sizeof(header), + header.length) + == header.length) + ) { + xenidc_local_buffer_reference_advance + (&wrapping_lbr, + sizeof(header) + + header.length); + + channel->target_resource_free + [channel-> + next_target_resource] = 0; + + channel->target_resource_offset + [channel-> + next_target_resource] = + xenidc_local_buffer_reference_query_byte_offset + (&wrapping_lbr); + if (++channel-> + next_target_resource == + XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT) + { + channel-> + next_target_resource + = 0; + } + } else { + error = 1; + + break; + } + } + + spin_unlock_irqrestore(&channel->lock, flags); + + { + xenidc_channel_message *message = + xenidc_gnttab_channel_target_resource_to_message + (resource); + + xenidc_channel_message_set_message_lbr + (message, element); + + xenidc_channel_handle_message + (&channel->channel, message); + + progress = 1; + } + + spin_lock_irqsave(&channel->lock, flags); + } + + channel->recv_ring_offset = + xenidc_local_buffer_reference_query_byte_offset + (&wrapping_lbr); + + if (error) { + spin_unlock_irqrestore(&channel->lock, flags); + + channel->protocol_error(channel); + + spin_lock_irqsave(&channel->lock, flags); + } + + if (error || (!progress)) { + /* We have EITHER got here from testing the ring without */ + /* dropping the lock so we WON'T DROP any kick requests OR */ + /* there was an error so we DON'T CARE if we drop a kick */ + /* request. */ + + channel->recv_ring_kick_out = 0; + + xenidc_gnttab_channel_handle_stimulus + (channel, + xenidc_gnttab_channel_stimulus_krc); + + break; + } + } + + spin_unlock_irqrestore(&channel->lock, flags); + } +} + +static void xenidc_gnttab_channel_kick_recv_ring_2(xenidc_callback * callback) { + trace(); + + { + xenidc_gnttab_channel_target_resource *resource = + xenidc_gnttab_channel_target_resource_callback_to(callback); + + xenidc_gnttab_channel *channel = + xenidc_gnttab_channel_target_resource_query_channel + (resource); + + int current_target_resource = ((((unsigned long)resource) + - + ((unsigned long)&channel-> + target_resources[0]) + ) + / + sizeof + (xenidc_gnttab_channel_target_resource) + ); + + if (xenidc_callback_query_error(callback) != + XENIDC_ERROR_SUCCESS) { + channel->protocol_error(channel); + } + + { + unsigned long flags; + + spin_lock_irqsave(&channel->lock, flags); + + channel->target_resource_free[current_target_resource] = + 1; + + if (current_target_resource == + channel->first_target_resource) { + u16 offset; + + do { + offset = channel->target_resource_offset + [channel->first_target_resource]; + + if (++channel->first_target_resource + == + XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT) + { + channel->first_target_resource = + 0; + } + } + while + (channel->target_resource_free + [channel->first_target_resource] + && + (channel->first_target_resource + != channel->next_target_resource) + ); + + mb(); /* Ensure reads complete before we free space. */ + + ((xenidc_channel_ring_header *) channel-> + send_ring)->other_ring_consumer_offset = +offset; + + notify_remote_via_irq(channel->recv_irq); + + if (!channel->target_resource_free + [channel->first_target_resource] + ) { + xenidc_gnttab_channel_handle_stimulus + (channel, + xenidc_gnttab_channel_stimulus_trc); + } else { + xenidc_gnttab_channel_handle_stimulus + (channel, + xenidc_gnttab_channel_stimulus_tri); + } + } + + spin_unlock_irqrestore(&channel->lock, flags); + } + } +} + +static void xenidc_gnttab_channel_complete_current_callback + (xenidc_gnttab_channel * channel) { + trace(); + + xenidc_callback_success(channel->current_callback); +} + +static void xenidc_gnttab_channel_fail_current_callback + (xenidc_gnttab_channel * channel) { + trace(); + + xenidc_callback_complete + (channel->current_callback, XENIDC_ERROR_FAILURE); +} + +static void xenidc_gnttab_channel_fail_out_messages + (xenidc_gnttab_channel * channel) { + trace(); + + while (!list_empty(&channel->message_list)) { + xenidc_channel_message *message = list_entry + (channel->message_list.next, + xenidc_channel_message, + XENIDC_CHANNEL_MESSAGE_LINK); + + list_del_init(xenidc_channel_message_to_link(message)); + + xenidc_callback_success + (xenidc_channel_message_to_callback(message)); + } +} + +static void xenidc_gnttab_channel_test_target_resources + (xenidc_gnttab_channel * channel) { + trace(); + + if (channel->target_resource_free[channel->first_target_resource]) { + xenidc_gnttab_channel_handle_stimulus + (channel, xenidc_gnttab_channel_stimulus_tri); + } +} diff -r 884447d31e86 -r 47edf571a62d linux-2.6-xen-sparse/drivers/xen/xenidc/xenidc_gnttab_channel_enumeration.dot --- /dev/null Sun Nov 20 17:43:54 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenidc/xenidc_gnttab_channel_enumeration.dot Sun Nov 20 17:44:51 2005 @@ -0,0 +1,55 @@ +digraph enumeration { +size="7,7" + +i[style=filled,fillcolor=green] +i->i_c1r[label="c1r\ndo_phase_one_connect"] + +i_c1r[style=filled,fillcolor=green] +i_c1r->i_c1r[label="sir"] +i_c1r->i_c1r_c1c[label="c1c\ncomplete_current_callback"] + +i_c1r_c1c[style=filled,fillcolor=green] +i_c1r_c1c->i_c1r_c1c_c2r[label="c2r\ndo_phase_two_connect"] +i_c1r_c1c->i_c1r_c1c_d1r[label="d1r\ndo_phase_one_disconnect"] +i_c1r_c1c->i_c1r_c1c[label="sir"] + +i_c1r_c1c_c2r[style=filled,fillcolor=green] +i_c1r_c1c_c2r->i_c1r_c1c_c2r[label="sir/rir"] +i_c1r_c1c_c2r->i_c1r_c1c_c2r_c2s[label="c2s\nconnect_client"] +i_c1r_c1c_c2r->i_c1r_c1c[label="c2f\nfail_current_callback"] + +i_c1r_c1c_d1r[style=filled,fillcolor=orange] +i_c1r_c1c_d1r->i_c1r_c1c_d1r[label="sir"] +i_c1r_c1c_d1r->i[label="d1c\ncomplete_current_callback"] + +i_c1r_c1c_c2r_c2s[style=filled,fillcolor=green] +i_c1r_c1c_c2r_c2s->i_c1r_c1c_c2r_c2s[label="mqr/sir/rir"] +i_c1r_c1c_c2r_c2s->i_c1r_c1c_c2r_c2s_ccc[label="ccc\ncomplete_current_callback\nkick_send_ring\nkick_recv_ring"] + +i_c1r_c1c_c2r_c2s_ccc[style=filled,fillcolor=green] +i_c1r_c1c_c2r_c2s_ccc->i_c1r_c1c_c2r_c2s_ccc[label="mqr/sir\nkick_send_ring"] +i_c1r_c1c_c2r_c2s_ccc->i_c1r_c1c_c2r_c2s_ccc[label="rir/trc/tri\nkick_recv_ring"] +i_c1r_c1c_c2r_c2s_ccc->i_c1r_c1c_c2r_c2s_ccc[label="ksc/krc/trb"] +i_c1r_c1c_c2r_c2s_ccc->i_c1r_c1c_c2r_c2s_ccc_d2r[label="d2r\nkick_send_ring\nkick_recv_ring"] + +i_c1r_c1c_c2r_c2s_ccc_d2r[style=filled,fillcolor=orange] +i_c1r_c1c_c2r_c2s_ccc_d2r->i_c1r_c1c_c2r_c2s_ccc_d2r[label="mqr/sir/rir/trb/trc/tri"] +i_c1r_c1c_c2r_c2s_ccc_d2r->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc[label="ksc/krc"] + +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc[style=filled,fillcolor=orange] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc[label="mqr/sir/rir/trb/trc/tri"] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc[label="ksc/krc\ndisconnect_client\nfail_out_messages"] + +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc[style=filled,fillcolor=orange] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc[label="mqr\nfail_out_messages"] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc[label="sir/rir/trc/tri"] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc[label="dcc\ntest_target_resources"] + +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc[style=filled,fillcolor=orange] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc[label="sir/rir/trc"] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri[label="tri\nphase_two_disconnect"] + +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri[style=filled,fillcolor=orange] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri->i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri[label="sir/rir"] +i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri->i_c1r_c1c[label="d2c\ncomplete_current_callback"] +} diff -r 884447d31e86 -r 47edf571a62d linux-2.6-xen-sparse/include/asm-xen/xenidc_gnttab_channel.h --- /dev/null Sun Nov 20 17:43:54 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/xenidc_gnttab_channel.h Sun Nov 20 17:44:51 2005 @@ -0,0 +1,220 @@ +/*****************************************************************************/ +/* This is a class which implements a grant-tables based inter-domain */ +/* message channel. The implementation of the bring-up and tear-down */ +/* handshaking is left to a derived class. */ +/* This class is used by xenidc_xbgt_channel (which implements bring-up and */ +/* teardown using xenbus) which is in turn used to implement the */ +/* xenidc_endpoint class. */ +/* */ +/* Copyright (c) 2005 Harry Butterworth IBM Corporation */ +/* */ +/* This program is free software; you can redistribute it and/or modify it */ +/* under the terms of the GNU General Public License as published by the */ +/* Free Software Foundation; either version 2 of the License, or (at your */ +/* option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, but */ +/* WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General */ +/* Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License along */ +/* with this program; if not, write to the Free Software Foundation, Inc., */ +/* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/*****************************************************************************/ + +#ifndef _XENIDC_GNTTAB_CHANNEL_H +#define _XENIDC_GNTTAB_CHANNEL_H + +#include +#include +#include + +typedef struct xenidc_gnttab_channel_struct xenidc_gnttab_channel; + +typedef struct xenidc_gnttab_channel_target_resource_struct + xenidc_gnttab_channel_target_resource; + +struct xenidc_gnttab_channel_target_resource_struct { + xenidc_channel_message message; + xenidc_gnttab_channel *channel; +}; + +#define XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_LINK \ +message.XENIDC_CHANNEL_MESSAGE_LINK + +typedef enum { + xenidc_gnttab_channel_state_i, + xenidc_gnttab_channel_state_i_c1r, + xenidc_gnttab_channel_state_i_c1r_c1c, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r, + xenidc_gnttab_channel_state_i_c1r_c1c_d1r, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc, + xenidc_gnttab_channel_state_i_c1r_c1c_c2r_c2s_ccc_d2r_ksc_krc_dcc_tri +} xenidc_gnttab_channel_state; + +#define XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT 16 + +struct xenidc_gnttab_channel_struct { + xenidc_channel channel; + void (*protocol_error) (xenidc_gnttab_channel * channel); + + xenidc_gnttab_channel *send_irq_context; + xenidc_gnttab_channel *recv_irq_context; + + void *send_ring; + struct vm_struct *recv_ring_area; + grant_ref_t grant_ref_pool; + + spinlock_t lock; + + xenidc_gnttab_channel_state state; + + struct list_head message_list; + + xenidc_work do_phase_one_connect_1_work; + xenidc_work do_phase_two_connect_1_work; + xenidc_work connect_client_1_work; + xenidc_work disconnect_client_1_work; + xenidc_callback disconnect_client_2_callback; + xenidc_work do_phase_two_disconnect_1_work; + xenidc_work do_phase_one_disconnect_1_work; + xenidc_work kick_send_ring_1_work; + xenidc_work kick_recv_ring_1_work; + + xenidc_gnttab_channel_target_resource target_resources + [XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT]; + u8 target_resource_free[XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT]; + u16 target_resource_offset[XENIDC_GNTTAB_CHANNEL_TARGET_RESOURCE_COUNT]; + + int first_target_resource; + int next_target_resource; + + int send_ring_kick_out:1; + int recv_ring_kick_out:1; + + xenidc_callback *current_callback; + + domid_t remote_domain_id; + struct timer_list timer; + + unsigned int send_event_channel; + grant_ref_t send_ring_ref; + int send_irq; + + unsigned int recv_event_channel; + u16 recv_ring_handle; + xenidc_local_buffer_reference recv_ring_lbr; + int recv_irq; + u16 recv_ring_offset; +}; + +/* xenidc_gnttab_channel implements the xenidc_channel interface defined in */ +/* xenidc_channel.h. */ + +static inline xenidc_channel *xenidc_gnttab_channel_to_channel + (xenidc_gnttab_channel * channel) { + return &channel->channel; +} + +/* Cast from base class. */ + +static inline xenidc_gnttab_channel *xenidc_gnttab_channel_channel_to + (xenidc_channel * channel) { + return container_of(channel, xenidc_gnttab_channel, channel); +} + +/* Called by derived class. The derived class provides a function for the */ +/* channel to call when it detects a protocol error. This could either */ +/* disconnect the channel or try to reset it. */ + +extern int xenidc_gnttab_channel_init + (xenidc_gnttab_channel * channel, + void (*protocol_error) (xenidc_gnttab_channel * channel) + ); + +/* Connection is done in two phases. In the first phase the */ +/* xenidc_gnttab_channel makes the local resources ready for use by the */ +/* other side and provides information about them to the derived class for */ +/* communication to the other side. In the second phase the derived class */ +/* passes this information communicated from the other side and the */ +/* connection process is completed. */ + +/* xenidc_gnttab_channel_phase_one_connect should never fail because we are */ +/* supposed to have reserved all required resources during initialisation. */ +/* Called by derived class. */ + +typedef struct xenidc_gnttab_channel_phase_one_connect_request_struct + xenidc_gnttab_channel_phase_one_connect_request; + +struct xenidc_gnttab_channel_phase_one_connect_request_struct { + xenidc_callback callback; + domid_t remote_domain_id; /* IN */ + grant_ref_t send_ring_ref; /* OUT */ + unsigned int send_event_channel; /* OUT */ +}; + +static inline xenidc_gnttab_channel_phase_one_connect_request + *xenidc_gnttab_channel_phase_one_connect_request_callback_to(xenidc_callback + * callback) { + return container_of(callback, + xenidc_gnttab_channel_phase_one_connect_request, + callback); +} + +extern void xenidc_gnttab_channel_phase_one_connect + (xenidc_gnttab_channel * channel, + xenidc_gnttab_channel_phase_one_connect_request * request); + +/* Called by derived class whilst phase one connected to reset the ring */ +/* before (re)establishing the connection with the other side. */ + +extern void xenidc_gnttab_channel_reset_ring(xenidc_gnttab_channel * channel); + +/* xenidc_gnttab_channel_phase_two_connect should only fail if the remote */ +/* domain provided incorrect parameters. */ +/* Called by derived class. */ + +typedef struct xenidc_gnttab_channel_phase_two_connect_request_struct + xenidc_gnttab_channel_phase_two_connect_request; + +struct xenidc_gnttab_channel_phase_two_connect_request_struct { + xenidc_callback callback; + domid_t remote_domain_id; /* IN */ + grant_ref_t recv_ring_ref; /* IN */ + unsigned int recv_event_channel; /* IN */ +}; + +static inline xenidc_gnttab_channel_phase_two_connect_request + *xenidc_gnttab_channel_phase_two_connect_request_callback_to(xenidc_callback + * callback) { + return container_of(callback, + xenidc_gnttab_channel_phase_two_connect_request, + callback); +} + +extern void xenidc_gnttab_channel_phase_two_connect + (xenidc_gnttab_channel * channel, + xenidc_gnttab_channel_phase_two_connect_request * request); + +/* Called by derived class. */ + +extern void xenidc_gnttab_channel_phase_two_disconnect + (xenidc_gnttab_channel * channel, xenidc_callback * callback); + +/* Called by derived class. */ + +extern void xenidc_gnttab_channel_phase_one_disconnect + (xenidc_gnttab_channel * channel, xenidc_callback * callback); + +/* Called by derived class. */ + +extern void xenidc_gnttab_channel_exit(xenidc_gnttab_channel * channel); + +#endif