[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] xen-netback: correct return value checks on xenbus_scanf()
> -----Original Message----- > From: Xen-devel [mailto:xen-devel-bounces@xxxxxxxxxxxxx] On Behalf Of Jan > Beulich > Sent: 07 July 2016 08:57 > To: Wei Liu > Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx; netdev@xxxxxxxxxxxxxxx > Subject: [Xen-devel] [PATCH] xen-netback: correct return value checks on > xenbus_scanf() > > Only a positive return value indicates success. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > --- > drivers/net/xen-netback/xenbus.c | 26 +++++++++++++------------- > 1 file changed, 13 insertions(+), 13 deletions(-) > > --- 4.7-rc6-xenbus_scanf.orig/drivers/net/xen-netback/xenbus.c > +++ 4.7-rc6-xenbus_scanf/drivers/net/xen-netback/xenbus.c > @@ -741,7 +741,7 @@ static void xen_mcast_ctrl_changed(struc > int val; > > if (xenbus_scanf(XBT_NIL, dev->otherend, > - "request-multicast-control", "%d", &val) < 0) > + "request-multicast-control", "%d", &val) <= 0) > val = 0; > vif->multicast_control = !!val; > } > @@ -890,7 +890,7 @@ static void connect(struct backend_info > err = xenbus_scanf(XBT_NIL, dev->otherend, > "multi-queue-num-queues", > "%u", &requested_num_queues); > - if (err < 0) { > + if (err <= 0) { > requested_num_queues = 1; /* Fall back to single queue */ > } else if (requested_num_queues > xenvif_max_queues) { > /* buggy or malicious guest */ > @@ -1056,7 +1056,7 @@ static int connect_data_rings(struct bac > if (err < 0) { > err = xenbus_scanf(XBT_NIL, xspath, > "event-channel", "%u", &tx_evtchn); > - if (err < 0) { > + if (err <= 0) { > xenbus_dev_fatal(dev, err, > "reading %s/event-channel(-tx/rx)", > xspath); > @@ -1092,10 +1092,10 @@ static int read_xenbus_vif_flags(struct > err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", > "%u", > &rx_copy); > if (err == -ENOENT) { > - err = 0; > + err = 1; > rx_copy = 0; > } > - if (err < 0) { > + if (err <= 0) { > xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", > dev->otherend); > return err; > @@ -1104,7 +1104,7 @@ static int read_xenbus_vif_flags(struct > return -EOPNOTSUPP; > > if (xenbus_scanf(XBT_NIL, dev->otherend, > - "feature-rx-notify", "%d", &val) < 0) > + "feature-rx-notify", "%d", &val) <= 0) > val = 0; > if (!val) { > /* - Reduce drain timeout to poll more frequently for > @@ -1116,7 +1116,7 @@ static int read_xenbus_vif_flags(struct > } > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > vif->can_sg = !!val; > > @@ -1124,25 +1124,25 @@ static int read_xenbus_vif_flags(struct > vif->gso_prefix_mask = 0; > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > if (val) > vif->gso_mask |= GSO_BIT(TCPV4); > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4- > prefix", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > if (val) > vif->gso_prefix_mask |= GSO_BIT(TCPV4); > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > if (val) > vif->gso_mask |= GSO_BIT(TCPV6); > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6- > prefix", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > if (val) > vif->gso_prefix_mask |= GSO_BIT(TCPV6); > @@ -1156,12 +1156,12 @@ static int read_xenbus_vif_flags(struct > } > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum- > offload", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > vif->ip_csum = !val; > > if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum- > offload", > - "%d", &val) < 0) > + "%d", &val) <= 0) > val = 0; > vif->ipv6_csum = !!val; > > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > https://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |