Blob Blame History Raw
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e32b24b..83fa09a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
 	unsigned long flags;
 
 	spin_lock_irqsave(&vblk->lock, flags);
-	while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+	while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
 		int error;
 
 		switch (vbr->status) {
@@ -158,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
 		}
 	}
 
-	if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+	if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
 		mempool_free(vbr, vblk->pool);
 		return false;
 	}
@@ -187,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
 	}
 
 	if (issued)
-		vblk->vq->vq_ops->kick(vblk->vq);
+		virtqueue_kick(vblk->vq);
 }
 
 static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a7..75f1cbd 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
 static void random_recv_done(struct virtqueue *vq)
 {
 	/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
-	if (!vq->vq_ops->get_buf(vq, &data_avail))
+	if (!virtqueue_get_buf(vq, &data_avail))
 		return;
 
 	complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
 	sg_init_one(&sg, buf, size);
 
 	/* There should always be room for one buffer. */
-	if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+	if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
 		BUG();
 
-	vq->vq_ops->kick(vq);
+	virtqueue_kick(vq);
 }
 
 static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c..48ce834 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -328,7 +328,7 @@ static void *get_inbuf(struct port *port)
 	unsigned int len;
 
 	vq = port->in_vq;
-	buf = vq->vq_ops->get_buf(vq, &len);
+	buf = virtqueue_get_buf(vq, &len);
 	if (buf) {
 		buf->len = len;
 		buf->offset = 0;
@@ -349,8 +349,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
 
 	sg_init_one(sg, buf->buf, buf->size);
 
-	ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
-	vq->vq_ops->kick(vq);
+	ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+	virtqueue_kick(vq);
 	return ret;
 }
 
@@ -366,7 +366,7 @@ static void discard_port_data(struct port *port)
 	if (port->inbuf)
 		buf = port->inbuf;
 	else
-		buf = vq->vq_ops->get_buf(vq, &len);
+		buf = virtqueue_get_buf(vq, &len);
 
 	ret = 0;
 	while (buf) {
@@ -374,7 +374,7 @@ static void discard_port_data(struct port *port)
 			ret++;
 			free_buf(buf);
 		}
-		buf = vq->vq_ops->get_buf(vq, &len);
+		buf = virtqueue_get_buf(vq, &len);
 	}
 	port->inbuf = NULL;
 	if (ret)
@@ -421,9 +421,9 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
 	vq = port->portdev->c_ovq;
 
 	sg_init_one(sg, &cpkt, sizeof(cpkt));
-	if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
-		vq->vq_ops->kick(vq);
-		while (!vq->vq_ops->get_buf(vq, &len))
+	if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+		virtqueue_kick(vq);
+		while (!virtqueue_get_buf(vq, &len))
 			cpu_relax();
 	}
 	return 0;
@@ -439,10 +439,10 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
 	out_vq = port->out_vq;
 
 	sg_init_one(sg, in_buf, in_count);
-	ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+	ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
 
 	/* Tell Host to go! */
-	out_vq->vq_ops->kick(out_vq);
+	virtqueue_kick(out_vq);
 
 	if (ret < 0) {
 		in_count = 0;
@@ -450,7 +450,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
 	}
 
 	/* Wait till the host acknowledges it pushed out the data we sent. */
-	while (!out_vq->vq_ops->get_buf(out_vq, &len))
+	while (!virtqueue_get_buf(out_vq, &len))
 		cpu_relax();
 fail:
 	/* We're expected to return the amount of data we wrote */
@@ -901,7 +901,7 @@ static int remove_port(struct port *port)
 	discard_port_data(port);
 
 	/* Remove buffers we queued up for the Host to send us data in. */
-	while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
 		free_buf(buf);
 
 	kfree(port->name);
@@ -1030,7 +1030,7 @@ static void control_work_handler(struct work_struct *work)
 	vq = portdev->c_ivq;
 
 	spin_lock(&portdev->cvq_lock);
-	while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+	while ((buf = virtqueue_get_buf(vq, &len))) {
 		spin_unlock(&portdev->cvq_lock);
 
 		buf->len = len;
@@ -1224,7 +1224,7 @@ static int add_port(struct ports_device *portdev, u32 id)
 	return 0;
 
 free_inbufs:
-	while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
 		free_buf(buf);
 free_device:
 	device_destroy(pdrvdata.class, port->dev->devt);
@@ -1536,10 +1536,10 @@ static void virtcons_remove(struct virtio_device *vdev)
 
 	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
 
-	while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+	while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
 		free_buf(buf);
 
-	while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+	while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
 		free_buf(buf);
 
 	vdev->config->del_vqs(vdev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd..91738d8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq)
 	struct virtnet_info *vi = svq->vdev->priv;
 
 	/* Suppress further interrupts. */
-	svq->vq_ops->disable_cb(svq);
+	virtqueue_disable_cb(svq);
 
 	/* We were probably waiting for more output buffers. */
 	netif_wake_queue(vi->dev);
@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
 			return -EINVAL;
 		}
 
-		page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+		page = virtqueue_get_buf(vi->rvq, &len);
 		if (!page) {
 			pr_debug("%s: rx error: %d buffers missing\n",
 				 skb->dev->name, hdr->mhdr.num_buffers);
@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
 
 	skb_to_sgvec(skb, sg + 1, 0, skb->len);
 
-	err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+	err = virtqueue_add_buf(vi->rvq, sg, 0, 2, skb);
 	if (err < 0)
 		dev_kfree_skb(skb);
 
@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
 
 	/* chain first in list head */
 	first->private = (unsigned long)list;
-	err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+	err = virtqueue_add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
 				       first);
 	if (err < 0)
 		give_pages(vi, first);
@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
 
 	sg_init_one(&sg, page_address(page), PAGE_SIZE);
 
-	err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+	err = virtqueue_add_buf(vi->rvq, &sg, 0, 1, page);
 	if (err < 0)
 		give_pages(vi, page);
 
@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 	} while (err > 0);
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
-	vi->rvq->vq_ops->kick(vi->rvq);
+	virtqueue_kick(vi->rvq);
 	return !oom;
 }
 
@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq)
 	struct virtnet_info *vi = rvq->vdev->priv;
 	/* Schedule NAPI, Suppress further interrupts if successful. */
 	if (napi_schedule_prep(&vi->napi)) {
-		rvq->vq_ops->disable_cb(rvq);
+		virtqueue_disable_cb(rvq);
 		__napi_schedule(&vi->napi);
 	}
 }
@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 
 again:
 	while (received < budget &&
-	       (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+	       (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
 		receive_buf(vi->dev, buf, len);
 		--vi->num;
 		received++;
@@ -487,9 +487,9 @@ again:
 	/* Out of packets? */
 	if (received < budget) {
 		napi_complete(napi);
-		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+		if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
 		    napi_schedule_prep(napi)) {
-			vi->rvq->vq_ops->disable_cb(vi->rvq);
+			virtqueue_disable_cb(vi->rvq);
 			__napi_schedule(napi);
 			goto again;
 		}
@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 	struct sk_buff *skb;
 	unsigned int len, tot_sgs = 0;
 
-	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 		vi->dev->stats.tx_bytes += skb->len;
 		vi->dev->stats.tx_packets++;
@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 		sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
 
 	hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-	return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+	return virtqueue_add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
 }
 
 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -587,7 +587,7 @@ again:
 		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	vi->svq->vq_ops->kick(vi->svq);
+	virtqueue_kick(vi->svq);
 
 	/* Don't wait up for transmitted skbs to be freed. */
 	skb_orphan(skb);
@@ -595,12 +595,12 @@ again:
 	 * before it gets out of hand.  Naturally, this wastes entries. */
 	if (capacity < 2+MAX_SKB_FRAGS) {
 		netif_stop_queue(dev);
-		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+		if (unlikely(!virtqueue_enable_cb(vi->svq))) {
 			/* More just got used, free them then recheck. */
 			capacity += free_old_xmit_skbs(vi);
 			if (capacity >= 2+MAX_SKB_FRAGS) {
 				netif_start_queue(dev);
-				vi->svq->vq_ops->disable_cb(vi->svq);
+				virtqueue_disable_cb(vi->svq);
 			}
 		}
 	}
@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev)
 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
 	if (napi_schedule_prep(&vi->napi)) {
-		vi->rvq->vq_ops->disable_cb(vi->rvq);
+		virtqueue_disable_cb(vi->rvq);
 		__napi_schedule(&vi->napi);
 	}
 	return 0;
@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
 	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
 
-	BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
 
-	vi->cvq->vq_ops->kick(vi->cvq);
+	virtqueue_kick(vi->cvq);
 
 	/*
 	 * Spin for a response, the kick causes an ioport write, trapping
 	 * into the hypervisor, so the request should be handled immediately.
 	 */
-	while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+	while (!virtqueue_get_buf(vi->cvq, &tmp))
 		cpu_relax();
 
 	return status == VIRTIO_NET_OK;
@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
 {
 	void *buf;
 	while (1) {
-		buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+		buf = virtqueue_detach_unused_buf(vi->svq);
 		if (!buf)
 			break;
 		dev_kfree_skb(buf);
 	}
 	while (1) {
-		buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+		buf = virtqueue_detach_unused_buf(vi->rvq);
 		if (!buf)
 			break;
 		if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfec7c2..0f1da45 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
 	struct virtio_balloon *vb;
 	unsigned int len;
 
-	vb = vq->vq_ops->get_buf(vq, &len);
+	vb = virtqueue_get_buf(vq, &len);
 	if (vb)
 		complete(&vb->acked);
 }
@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 	init_completion(&vb->acked);
 
 	/* We should always be able to add one buffer to an empty queue. */
-	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+	if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
 		BUG();
-	vq->vq_ops->kick(vq);
+	virtqueue_kick(vq);
 
 	/* When host has read buffer, this completes via balloon_ack */
 	wait_for_completion(&vb->acked);
@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
 	struct virtio_balloon *vb;
 	unsigned int len;
 
-	vb = vq->vq_ops->get_buf(vq, &len);
+	vb = virtqueue_get_buf(vq, &len);
 	if (!vb)
 		return;
 	vb->need_stats_update = 1;
@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)
 
 	vq = vb->stats_vq;
 	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
-	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+	if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
 		BUG();
-	vq->vq_ops->kick(vq);
+	virtqueue_kick(vq);
 }
 
 static void virtballoon_changed(struct virtio_device *vdev)
@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
 		 * use it to signal us later.
 		 */
 		sg_init_one(&sg, vb->stats, sizeof vb->stats);
-		if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
-						  &sg, 1, 0, vb) < 0)
+		if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
 			BUG();
-		vb->stats_vq->vq_ops->kick(vb->stats_vq);
+		virtqueue_kick(vb->stats_vq);
 	}
 
 	vb->thread = kthread_run(balloon, vb, "vballoon");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 0f90634..0717b5b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -155,11 +155,11 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
 	return head;
 }
 
-static int vring_add_buf(struct virtqueue *_vq,
-			 struct scatterlist sg[],
-			 unsigned int out,
-			 unsigned int in,
-			 void *data)
+int virtqueue_add_buf(struct virtqueue *_vq,
+		  struct scatterlist sg[],
+		  unsigned int out,
+		  unsigned int in,
+		  void *data)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	unsigned int i, avail, head, uninitialized_var(prev);
@@ -232,8 +232,9 @@ add_head:
 		return vq->num_free ? vq->vring.num : 0;
 	return vq->num_free;
 }
+EXPORT_SYMBOL_GPL(virtqueue_add_buf);
 
-static void vring_kick(struct virtqueue *_vq)
+void virtqueue_kick(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	START_USE(vq);
@@ -253,6 +254,7 @@ static void vring_kick(struct virtqueue *_vq)
 
 	END_USE(vq);
 }
+EXPORT_SYMBOL_GPL(virtqueue_kick);
 
 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 {
@@ -284,7 +286,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
 	return vq->last_used_idx != vq->vring.used->idx;
 }
 
-static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
+void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	void *ret;
@@ -325,15 +327,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
 	END_USE(vq);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 
-static void vring_disable_cb(struct virtqueue *_vq)
+void virtqueue_disable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 }
+EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 
-static bool vring_enable_cb(struct virtqueue *_vq)
+bool virtqueue_enable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -351,8 +355,9 @@ static bool vring_enable_cb(struct virtqueue *_vq)
 	END_USE(vq);
 	return true;
 }
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
 
-static void *vring_detach_unused_buf(struct virtqueue *_vq)
+void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	unsigned int i;
@@ -375,6 +380,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq)
 	END_USE(vq);
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 
 irqreturn_t vring_interrupt(int irq, void *_vq)
 {
@@ -396,15 +402,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
 }
 EXPORT_SYMBOL_GPL(vring_interrupt);
 
-static struct virtqueue_ops vring_vq_ops = {
-	.add_buf = vring_add_buf,
-	.get_buf = vring_get_buf,
-	.kick = vring_kick,
-	.disable_cb = vring_disable_cb,
-	.enable_cb = vring_enable_cb,
-	.detach_unused_buf = vring_detach_unused_buf,
-};
-
 struct virtqueue *vring_new_virtqueue(unsigned int num,
 				      unsigned int vring_align,
 				      struct virtio_device *vdev,
@@ -429,7 +426,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
 	vring_init(&vq->vring, num, pages, vring_align);
 	vq->vq.callback = callback;
 	vq->vq.vdev = vdev;
-	vq->vq.vq_ops = &vring_vq_ops;
 	vq->vq.name = name;
 	vq->notify = notify;
 	vq->broken = false;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 40d1709..5b0fce0 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -14,7 +14,6 @@
  * @callback: the function to call when buffers are consumed (can be NULL).
  * @name: the name of this virtqueue (mainly for debugging)
  * @vdev: the virtio device this queue was created for.
- * @vq_ops: the operations for this virtqueue (see below).
  * @priv: a pointer for the virtqueue implementation to use.
  */
 struct virtqueue {
@@ -22,60 +21,60 @@ struct virtqueue {
 	void (*callback)(struct virtqueue *vq);
 	const char *name;
 	struct virtio_device *vdev;
-	struct virtqueue_ops *vq_ops;
 	void *priv;
 };
 
 /**
- * virtqueue_ops - operations for virtqueue abstraction layer
- * @add_buf: expose buffer to other end
+ * operations for virtqueue
+ * virtqueue_add_buf: expose buffer to other end
  *	vq: the struct virtqueue we're talking about.
  *	sg: the description of the buffer(s).
  *	out_num: the number of sg readable by other side
  *	in_num: the number of sg which are writable (after readable ones)
  *	data: the token identifying the buffer.
  *      Returns remaining capacity of queue (sg segments) or a negative error.
- * @kick: update after add_buf
+ * virtqueue_kick: update after add_buf
  *	vq: the struct virtqueue
  *	After one or more add_buf calls, invoke this to kick the other side.
- * @get_buf: get the next used buffer
+ * virtqueue_get_buf: get the next used buffer
  *	vq: the struct virtqueue we're talking about.
  *	len: the length written into the buffer
  *	Returns NULL or the "data" token handed to add_buf.
- * @disable_cb: disable callbacks
+ * virtqueue_disable_cb: disable callbacks
  *	vq: the struct virtqueue we're talking about.
  *	Note that this is not necessarily synchronous, hence unreliable and only
  *	useful as an optimization.
- * @enable_cb: restart callbacks after disable_cb.
+ * virtqueue_enable_cb: restart callbacks after disable_cb.
  *	vq: the struct virtqueue we're talking about.
  *	This re-enables callbacks; it returns "false" if there are pending
  *	buffers in the queue, to detect a possible race between the driver
  *	checking for more work, and enabling callbacks.
- * @detach_unused_buf: detach first unused buffer
+ * virtqueue_detach_unused_buf: detach first unused buffer
  * 	vq: the struct virtqueue we're talking about.
  * 	Returns NULL or the "data" token handed to add_buf
  *
  * Locking rules are straightforward: the driver is responsible for
  * locking.  No two operations may be invoked simultaneously, with the exception
- * of @disable_cb.
+ * of virtqueue_disable_cb.
  *
  * All operations can be called in any context.
  */
-struct virtqueue_ops {
-	int (*add_buf)(struct virtqueue *vq,
-		       struct scatterlist sg[],
-		       unsigned int out_num,
-		       unsigned int in_num,
-		       void *data);
 
-	void (*kick)(struct virtqueue *vq);
+int virtqueue_add_buf(struct virtqueue *vq,
+		      struct scatterlist sg[],
+		      unsigned int out_num,
+		      unsigned int in_num,
+		      void *data);
 
-	void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+void virtqueue_kick(struct virtqueue *vq);
 
-	void (*disable_cb)(struct virtqueue *vq);
-	bool (*enable_cb)(struct virtqueue *vq);
-	void *(*detach_unused_buf)(struct virtqueue *vq);
-};
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+
+void virtqueue_disable_cb(struct virtqueue *vq);
+
+bool virtqueue_enable_cb(struct virtqueue *vq);
+
+void *virtqueue_detach_unused_buf(struct virtqueue *vq);
 
 /**
  * virtio_device - representation of a device using virtio
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 7eb78ec..dcfbe99 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq)
 
 	P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
 
-	while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
+	while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
 		P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
 		P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
 		req = p9_tag_lookup(chan->client, rc->tag);
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 
 	req->status = REQ_STATUS_SENT;
 
-	if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
+	if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
 		P9_DPRINTK(P9_DEBUG_TRANS,
 			"9p debug: virtio rpc add_buf returned failure");
 		return -EIO;
 	}
 
-	chan->vq->vq_ops->kick(chan->vq);
+	virtqueue_kick(chan->vq);
 
 	P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
 	return 0;