On 19-02-25, 17:04, Viresh Kumar wrote:
On 19-02-25, 11:25, Bertrand Marquis wrote:
It was my understanding that polling would be done at the virtqueue level and would not require anything specific in the transport itself.
Yeah, that's one way out but it likely needs updates to the virtio frontend drivers. And I don't want to get into that. I hope we can do better.
Finally after digging a bit into trusty's code and talking to Aryton, I realized that multi-thread support is there in trusty, but sleeping in the direct message handler would be difficult to implement.
And then I found an easy enough way to get the polling done from virtio_msg_ffa.c file itself. The call to vring_interrupt() (made from virtio_msg_receive()) is lightweight enough, and basically returns early if there are no new buffers queued by the device. I am doing the polling at a ms interval for now.
Aryton, I have already tested this with Xen based setup and it works. Please give it a try with trusty. Thanks.
(pushed to my virtio/msg branch)
diff --git a/drivers/virtio/virtio_msg_ffa.c b/drivers/virtio/virtio_msg_ffa.c index 20667571146b..7d7df47035f3 100644 --- a/drivers/virtio/virtio_msg_ffa.c +++ b/drivers/virtio/virtio_msg_ffa.c @@ -14,9 +14,11 @@
#include <linux/arm_ffa.h> #include <linux/completion.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/idr.h> +#include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_reserved_mem.h> @@ -62,6 +64,7 @@ struct virtio_msg_ffa_device { struct list_head area_list; struct mutex lock; /* to protect area_list */ struct virtio_msg_async async; + struct task_struct *used_event_task; void *response;
bool indirect; @@ -160,6 +163,34 @@ find_vmdev(struct virtio_msg_ffa_device *vmfdev, u16 dev_id) return NULL; }
+static int used_event_task(void *data) +{ + struct virtio_msg_ffa_device *vmfdev = data; + struct virtio_msg_device *vmdev; + struct virtio_msg_vq *info; + struct virtio_msg msg; + int i; + + virtio_msg_prepare(&msg, false, VIRTIO_MSG_EVENT_USED, 0); + + while (!kthread_should_stop()) { + for (i = 0; i < vmfdev->vmdev_count; i++) { + vmdev = &vmfdev->vmdevs[i]; + msg.event_used.index = 0; + + list_for_each_entry(info, &vmdev->virtqueues, node) { + virtio_msg_receive(vmdev, &msg); + msg.event_used.index++; + } + } + + /* sleep for 1ms */ + fsleep(1000); + } + + return 0; +} + static void handle_async_event(struct virtio_msg_ffa_device *vmfdev, struct virtio_msg *msg) { @@ -602,6 +633,15 @@ static int virtio_msg_ffa_probe(struct ffa_device *ffa_dev) } }
+ /* Run the kthread if indirect messages aren't supported */ + if (!(features & VIRTIO_MSG_FFA_FEATURE_INDIRECT_MSG_SUPP)) { + vmfdev->used_event_task = kthread_run(used_event_task, vmfdev, "vmsg-ffa-ue"); + if (IS_ERR(vmfdev->used_event_task)) { + ret = PTR_ERR(vmfdev->used_event_task); + goto deactivate; + } + } + return 0;
unregister: @@ -618,6 +658,7 @@ static void virtio_msg_ffa_remove(struct ffa_device *ffa_dev) { struct virtio_msg_ffa_device *vmfdev = ffa_dev->dev.driver_data;
+ kthread_stop(vmfdev->used_event_task); remove_vmdevs(vmfdev, vmfdev->vmdev_count); vmsg_ffa_bus_deactivate(vmfdev); }