[PATCH 4.13 09/11] vmbus: refactor hv_signal_on_read

From: Greg Kroah-Hartman
Date: Thu Oct 19 2017 - 09:42:20 EST


4.13-stable review patch. If anyone has any objections, please let me know.

------------------

From: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>

commit 8dd45f2ab005a1f3301296059b23b03ec3dbf79b upstream.

The function hv_signal_on_read was defined in hyperv.h and
only used in one place in ring_buffer code. Clearer to just
move it inline there.

Signed-off-by: Stephen Hemminger <sthemmin@xxxxxxxxxxxxx>
Signed-off-by: K. Y. Srinivasan <kys@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
drivers/hv/ring_buffer.c | 32 ++++++++++++++++++++++++++++--
include/linux/hyperv.h | 49 -----------------------------------------------
2 files changed, 30 insertions(+), 51 deletions(-)

--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,7 @@
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>

#include "hyperv_vmbus.h"

@@ -357,7 +358,7 @@ struct vmpacket_descriptor *hv_pkt_iter_
{
struct hv_ring_buffer_info *rbi = &channel->inbound;

- /* set state for later hv_signal_on_read() */
+ /* set state for later hv_pkt_iter_close */
rbi->cached_read_index = rbi->ring_buffer->read_index;

if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
@@ -400,6 +401,8 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 cur_write_sz, cached_write_sz;
+ u32 pending_sz;

/*
* Make sure all reads are done before we update the read index since
@@ -409,6 +412,31 @@ void hv_pkt_iter_close(struct vmbus_chan
virt_rmb();
rbi->ring_buffer->read_index = rbi->priv_read_index;

- hv_signal_on_read(channel);
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+ /* If the other end is not blocked on write don't bother. */
+ if (pending_sz == 0)
+ return;
+
+ cur_write_sz = hv_get_bytes_to_write(rbi);
+
+ if (cur_write_sz < pending_sz)
+ return;
+
+ cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+ if (cached_write_sz < pending_sz)
+ vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1476,55 +1476,6 @@ hv_get_ring_buffer(const struct hv_ring_
}

/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static inline void hv_signal_on_read(struct vmbus_channel *channel)
-{
- u32 cur_write_sz, cached_write_sz;
- u32 pending_sz;
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- virt_mb();
-
- pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return;
-
- cur_write_sz = hv_get_bytes_to_write(rbi);
-
- if (cur_write_sz < pending_sz)
- return;
-
- cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
- vmbus_setevent(channel);
-}
-
-/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)