[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] 9pfs/xen: Fix segfault on shutdown



On Tue, 2 May 2023, Jason Andryuk wrote:
> xen_9pfs_free can't use gnttabdev since it is already closed and NULL-ed
> out when free is called.  Do the teardown in _disconnect().  This
> matches the setup done in _connect().
> 
> trace-events are also added for the XenDevOps functions.
> 
> Signed-off-by: Jason Andryuk <jandryuk@xxxxxxxxx>
> ---
>  hw/9pfs/trace-events     |  5 +++++
>  hw/9pfs/xen-9p-backend.c | 36 +++++++++++++++++++++++-------------
>  2 files changed, 28 insertions(+), 13 deletions(-)
> 
> diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events
> index 6c77966c0b..7b5b0b5a48 100644
> --- a/hw/9pfs/trace-events
> +++ b/hw/9pfs/trace-events
> @@ -48,3 +48,8 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag 
> %d id %d fid %d"
>  v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d 
> name %s"
>  v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t 
> mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t 
> mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size 
> %"PRId64" atime=%"PRId64" mtime=%"PRId64" }"
>  v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u"
> +
> +xen_9pfs_alloc(char *name) "name %s"
> +xen_9pfs_connect(char *name) "name %s"
> +xen_9pfs_disconnect(char *name) "name %s"
> +xen_9pfs_free(char *name) "name %s"
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 0e266c552b..c646a0b3d1 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -25,6 +25,8 @@
>  #include "qemu/iov.h"
>  #include "fsdev/qemu-fsdev.h"
>  
> +#include "trace.h"
> +
>  #define VERSIONS "1"
>  #define MAX_RINGS 8
>  #define MAX_RING_ORDER 9
> @@ -337,6 +339,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice 
> *xendev)
>      Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
>      int i;
>  
> +    trace_xen_9pfs_disconnect(xendev->name);
> +
>      for (i = 0; i < xen_9pdev->num_rings; i++) {
>          if (xen_9pdev->rings[i].evtchndev != NULL) {
>              
> qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
> @@ -345,40 +349,42 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice 
> *xendev)
>                                     xen_9pdev->rings[i].local_port);
>              xen_9pdev->rings[i].evtchndev = NULL;
>          }
> -    }
> -}
> -
> -static int xen_9pfs_free(struct XenLegacyDevice *xendev)
> -{
> -    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
> -    int i;
> -
> -    if (xen_9pdev->rings[0].evtchndev != NULL) {
> -        xen_9pfs_disconnect(xendev);
> -    }
> -
> -    for (i = 0; i < xen_9pdev->num_rings; i++) {
>          if (xen_9pdev->rings[i].data != NULL) {
>              xen_be_unmap_grant_refs(&xen_9pdev->xendev,
>                                      xen_9pdev->rings[i].data,
>                                      xen_9pdev->rings[i].intf->ref,
>                                      (1 << xen_9pdev->rings[i].ring_order));
> +            xen_9pdev->rings[i].data = NULL;
>          }
>          if (xen_9pdev->rings[i].intf != NULL) {
>              xen_be_unmap_grant_ref(&xen_9pdev->xendev,
>                                     xen_9pdev->rings[i].intf,
>                                     xen_9pdev->rings[i].ref);
> +            xen_9pdev->rings[i].intf = NULL;
>          }
>          if (xen_9pdev->rings[i].bh != NULL) {
>              qemu_bh_delete(xen_9pdev->rings[i].bh);
> +            xen_9pdev->rings[i].bh = NULL;
>          }
>      }
>  
>      g_free(xen_9pdev->id);
> +    xen_9pdev->id = NULL;
>      g_free(xen_9pdev->tag);
> +    xen_9pdev->tag = NULL;
>      g_free(xen_9pdev->path);
> +    xen_9pdev->path = NULL;
>      g_free(xen_9pdev->security_model);
> +    xen_9pdev->security_model = NULL;
>      g_free(xen_9pdev->rings);
> +    xen_9pdev->rings = NULL;
> +    return;

NIT: this return is redudant.

Aside from that:

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> +}
> +
> +static int xen_9pfs_free(struct XenLegacyDevice *xendev)
> +{
> +    trace_xen_9pfs_free(xendev->name);
> +
>      return 0;
>  }
>  
> @@ -390,6 +396,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice 
> *xendev)
>      V9fsState *s = &xen_9pdev->state;
>      QemuOpts *fsdev;
>  
> +    trace_xen_9pfs_connect(xendev->name);
> +
>      if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
>                               &xen_9pdev->num_rings) == -1 ||
>          xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
> @@ -499,6 +507,8 @@ out:
>  
>  static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
>  {
> +    trace_xen_9pfs_alloc(xendev->name);
> +
>      xenstore_write_be_str(xendev, "versions", VERSIONS);
>      xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
>      xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
> -- 
> 2.40.1
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.