From fd883ca705cd0df60569939d2ffd4dc701a97257 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Tue, 5 Dec 2017 17:49:57 +0200 Subject: [PATCH] drm/xen-zcopy: Implement dumb free wait IOCTL 1. Provide wait_handle while calling DRM_XEN_ZCOPY_DUMB_FROM_REFS IOCTL. 2. Implement IOCTL which will block until dumb buffer with the wait handle provided be freed: this is needed for synchronization between frontend and backend in case frontend provides grant references of the buffer via DRM_XEN_ZCOPY_DUMB_FROM_REFS IOCTL and which must be released before backend replies with XENDISPL_OP_DBUF_DESTROY response. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Volodymyr Babchuk --- drivers/gpu/drm/xen/xen_drm_zcopy_drv.c | 257 ++++++++++++++++++++++-- include/uapi/drm/xen_zcopy_drm.h | 21 ++ 2 files changed, 266 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/xen/xen_drm_zcopy_drv.c b/drivers/gpu/drm/xen/xen_drm_zcopy_drv.c index a482e1090a3b96..03b3e545649a85 100644 --- a/drivers/gpu/drm/xen/xen_drm_zcopy_drv.c +++ b/drivers/gpu/drm/xen/xen_drm_zcopy_drv.c @@ -54,6 +54,42 @@ struct xen_gem_object { struct sg_table *sgt; /* map grant handles */ grant_handle_t *map_handles; + /* + * this is used for synchronous object deletion, e.g. + * when user-space wants to know that the grefs are unmapped + */ + struct kref refcount; + int wait_handle; +}; + +struct xen_wait_obj { + struct list_head list; + struct xen_gem_object *xen_obj; + struct completion completion; +}; + +struct xen_drv_info { + struct drm_device *drm_dev; + + /* + * for buffers created from front's grant references synchronization + * between backend and frontend is needed on buffer deletion as front + * expects us to unmap these references after XENDISPL_OP_DBUF_DESTROY + * response + * the rationale behind implementing own wait handle: + * - dumb buffer handle cannot be used as when the PRIME buffer + * gets exported there are at least 2 handles: one is for the + * backend and another one for the importing application, + * so when backend closes its handle and the other application still + * holds the buffer then there is no way for the backend to tell + * which buffer we want to wait for while calling xen_ioctl_wait_free + * - flink cannot be used as well as it is gone when DRM core + * calls .gem_free_object_unlocked + */ + struct list_head wait_obj_list; + struct idr idr; + spinlock_t idr_lock; + spinlock_t wait_list_lock; }; static inline struct xen_gem_object *to_xen_gem_obj( @@ -62,6 +98,113 @@ static inline struct xen_gem_object *to_xen_gem_obj( return container_of(gem_obj, struct xen_gem_object, base); } +static struct xen_wait_obj *xen_wait_obj_new(struct xen_drv_info *drv_info, + struct xen_gem_object *xen_obj) +{ + struct xen_wait_obj *wait_obj; + + wait_obj = kzalloc(sizeof(*wait_obj), GFP_KERNEL); + if (!wait_obj) + return ERR_PTR(-ENOMEM); + + init_completion(&wait_obj->completion); + wait_obj->xen_obj = xen_obj; + spin_lock(&drv_info->wait_list_lock); + list_add(&wait_obj->list, &drv_info->wait_obj_list); + spin_unlock(&drv_info->wait_list_lock); + return wait_obj; +} + +static void xen_wait_obj_free(struct xen_drv_info *drv_info, + struct xen_wait_obj *wait_obj) +{ + struct xen_wait_obj *cur_wait_obj, *q; + + spin_lock(&drv_info->wait_list_lock); + list_for_each_entry_safe(cur_wait_obj, q, + &drv_info->wait_obj_list, list) { + if (cur_wait_obj == wait_obj) { + list_del(&wait_obj->list); + kfree(wait_obj); + break; + } + } + spin_unlock(&drv_info->wait_list_lock); +} + +static void xen_wait_obj_check_pending(struct xen_drv_info *drv_info) +{ + /* + * it is intended to be called from .last_close when + * no pending wait objects should be on the list. + * make sure we don't miss a bug if this is not the case + */ + if (!list_empty(&drv_info->wait_obj_list)) { + DRM_ERROR("Removing with pending wait objects!\n"); + BUG(); + } +} + +static int xen_wait_obj_wait(struct xen_wait_obj *wait_obj, + uint32_t wait_to_ms) +{ + if (wait_for_completion_timeout(&wait_obj->completion, + msecs_to_jiffies(wait_to_ms)) <= 0) + return -ETIMEDOUT; + + return 0; +} + +static void xen_wait_obj_signal(struct xen_drv_info *drv_info, + struct xen_gem_object *xen_obj) +{ + struct xen_wait_obj *wait_obj, *q; + + spin_lock(&drv_info->wait_list_lock); + list_for_each_entry_safe(wait_obj, q, &drv_info->wait_obj_list, list) { + if (wait_obj->xen_obj == xen_obj) { + DRM_DEBUG("Found xen_obj in the wait list, wake\n"); + complete_all(&wait_obj->completion); + } + } + spin_unlock(&drv_info->wait_list_lock); +} + +static int xen_wait_obj_handle_new(struct xen_drv_info *drv_info, + struct xen_gem_object *xen_obj) +{ + int ret; + + idr_preload(GFP_KERNEL); + spin_lock(&drv_info->idr_lock); + ret = idr_alloc(&drv_info->idr, xen_obj, 1, 0, GFP_NOWAIT); + spin_unlock(&drv_info->idr_lock); + idr_preload_end(); + return ret; +} + +static void xen_wait_obj_handle_free(struct xen_drv_info *drv_info, + struct xen_gem_object *xen_obj) +{ + spin_lock(&drv_info->idr_lock); + idr_remove(&drv_info->idr, xen_obj->wait_handle); + spin_unlock(&drv_info->idr_lock); +} + +static struct xen_gem_object *xen_get_obj_by_wait_handle( + struct xen_drv_info *drv_info, int wait_handle) +{ + struct xen_gem_object *xen_obj; + + spin_lock(&drv_info->idr_lock); + /* check if xen_obj still exists */ + xen_obj = idr_find(&drv_info->idr, wait_handle); + if (xen_obj) + kref_get(&xen_obj->refcount); + spin_unlock(&drv_info->idr_lock); + return xen_obj; +} + #ifdef CONFIG_DRM_XEN_ZCOPY_CMA static int xen_alloc_ballooned_pages(struct device *dev, struct xen_gem_object *xen_obj) @@ -413,10 +556,22 @@ static int xen_gem_init_obj(struct xen_gem_object *xen_obj, return 0; } +static void xen_obj_release(struct kref *kref) +{ + struct xen_gem_object *xen_obj = + container_of(kref, struct xen_gem_object, refcount); + struct xen_drv_info *drv_info = xen_obj->base.dev->dev_private; + + xen_wait_obj_signal(drv_info, xen_obj); + kfree(xen_obj); +} + static void xen_gem_free_object(struct drm_gem_object *gem_obj) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); + struct xen_drv_info *drv_info = gem_obj->dev->dev_private; + DRM_DEBUG("Freeing dumb with handle %d\n", xen_obj->dumb_handle); if (xen_obj->grefs) { if (xen_obj->sgt) { if (xen_obj->base.import_attach) @@ -428,7 +583,9 @@ static void xen_gem_free_object(struct drm_gem_object *gem_obj) } } drm_gem_object_release(gem_obj); - kfree(xen_obj); + + xen_wait_obj_handle_free(drv_info, xen_obj); + kref_put(&xen_obj->refcount, xen_obj_release); } #ifdef CONFIG_DRM_XEN_ZCOPY_WA_SWIOTLB @@ -495,6 +652,7 @@ struct drm_gem_object *xen_gem_prime_import_sg_table(struct drm_device *dev, ret = xen_gem_init_obj(xen_obj, dev, attach->dmabuf->size); if (ret < 0) goto fail; + kref_init(&xen_obj->refcount); xen_obj->sgt = sgt; xen_obj->num_pages = DIV_ROUND_UP(attach->dmabuf->size, PAGE_SIZE); DRM_DEBUG("Imported buffer of size %zu with nents %u\n", @@ -510,12 +668,14 @@ static int xen_do_ioctl_from_refs(struct drm_device *dev, struct drm_xen_zcopy_dumb_from_refs *req, struct drm_file *file_priv) { + struct xen_drv_info *drv_info = dev->dev_private; struct xen_gem_object *xen_obj; int ret; xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL); if (!xen_obj) return -ENOMEM; + kref_init(&xen_obj->refcount); xen_obj->num_pages = req->num_grefs; xen_obj->otherend_id = req->otherend_id; xen_obj->grefs = kcalloc(xen_obj->num_pages, sizeof(grant_ref_t), @@ -538,6 +698,19 @@ static int xen_do_ioctl_from_refs(struct drm_device *dev, goto fail; /* return handle */ req->dumb.handle = xen_obj->dumb_handle; + + /* + * get user-visible handle for this GEM object. + * the wait object is not allocated at the moment, + * but if need be it will be allocated at the time of + * DRM_XEN_ZCOPY_DUMB_WAIT_FREE IOCTL + */ + ret = xen_wait_obj_handle_new(drv_info, xen_obj); + if (ret < 0) + goto fail; + + req->wait_handle = ret; + xen_obj->wait_handle = ret; return 0; fail: @@ -643,6 +816,51 @@ static int xen_ioctl_to_refs(struct drm_device *dev, return ret; } +static int xen_ioctl_wait_free(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_xen_zcopy_dumb_wait_free *req = + (struct drm_xen_zcopy_dumb_wait_free *)data; + struct xen_drv_info *drv_info = dev->dev_private; + struct xen_gem_object *xen_obj; + struct xen_wait_obj *wait_obj; + int wait_handle, ret; + + wait_handle = req->wait_handle; + /* + * try to find the wait handle: if not found means that + * either the handle has already been freed or wrong + */ + xen_obj = xen_get_obj_by_wait_handle(drv_info, wait_handle); + if (!xen_obj) + return -ENOENT; + + /* + * xen_obj still exists and is reference count locked by us now, so + * prepare to wait: allocate wait object and add it to the wait list, + * so we can find it on release + */ + wait_obj = xen_wait_obj_new(drv_info, xen_obj); + /* put our reference and wait for xen_obj release to fire */ + kref_put(&xen_obj->refcount, xen_obj_release); + ret = PTR_ERR_OR_ZERO(wait_obj); + if (ret < 0) { + DRM_ERROR("Failed to setup wait object, ret %d\n", ret); + return ret; + } + + ret = xen_wait_obj_wait(wait_obj, req->wait_to_ms); + xen_wait_obj_free(drv_info, wait_obj); + return ret; +} + +static void xen_lastclose(struct drm_device *dev) +{ + struct xen_drv_info *drv_info = dev->dev_private; + + xen_wait_obj_check_pending(drv_info); +} + static const struct drm_ioctl_desc xen_ioctls[] = { DRM_IOCTL_DEF_DRV(XEN_ZCOPY_DUMB_FROM_REFS, xen_ioctl_from_refs, @@ -650,6 +868,9 @@ static const struct drm_ioctl_desc xen_ioctls[] = { DRM_IOCTL_DEF_DRV(XEN_ZCOPY_DUMB_TO_REFS, xen_ioctl_to_refs, DRM_AUTH | DRM_CONTROL_ALLOW | DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(XEN_ZCOPY_DUMB_WAIT_FREE, + xen_ioctl_wait_free, + DRM_AUTH | DRM_CONTROL_ALLOW | DRM_UNLOCKED), }; static const struct file_operations xen_fops = { @@ -661,6 +882,7 @@ static const struct file_operations xen_fops = { static struct drm_driver xen_driver = { .driver_features = DRIVER_GEM | DRIVER_PRIME, + .lastclose = xen_lastclose, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .gem_prime_export = drm_gem_prime_export, .gem_prime_get_sg_table = xen_gem_prime_get_sg_table, @@ -680,42 +902,53 @@ static struct drm_driver xen_driver = { static int xen_remove(struct platform_device *pdev) { - struct drm_device *drm_dev = platform_get_drvdata(pdev); + struct xen_drv_info *drv_info = platform_get_drvdata(pdev); - if (drm_dev) { - drm_dev_unregister(drm_dev); - drm_dev_unref(drm_dev); + if (drv_info && drv_info->drm_dev) { + drm_dev_unregister(drv_info->drm_dev); + drm_dev_unref(drv_info->drm_dev); + idr_destroy(&drv_info->idr); } return 0; } static int xen_probe(struct platform_device *pdev) { - struct drm_device *drm_dev; + struct xen_drv_info *drv_info; int ret; DRM_INFO("Creating %s\n", xen_driver.desc); + drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); + if (!drv_info) + return -ENOMEM; + + idr_init(&drv_info->idr); + spin_lock_init(&drv_info->idr_lock); + spin_lock_init(&drv_info->wait_list_lock); + INIT_LIST_HEAD(&drv_info->wait_obj_list); #ifdef CONFIG_DRM_XEN_ZCOPY_CMA arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false); #endif - drm_dev = drm_dev_alloc(&xen_driver, &pdev->dev); - if (!drm_dev) + drv_info->drm_dev = drm_dev_alloc(&xen_driver, &pdev->dev); + if (!drv_info->drm_dev) return -ENOMEM; - ret = drm_dev_register(drm_dev, 0); + ret = drm_dev_register(drv_info->drm_dev, 0); if (ret < 0) goto fail; - platform_set_drvdata(pdev, drm_dev); + drv_info->drm_dev->dev_private = drv_info; + platform_set_drvdata(pdev, drv_info); DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", xen_driver.name, xen_driver.major, xen_driver.minor, xen_driver.patchlevel, - xen_driver.date, drm_dev->primary->index); + xen_driver.date, drv_info->drm_dev->primary->index); return 0; fail: - drm_dev_unref(drm_dev); + drm_dev_unref(drv_info->drm_dev); + kfree(drv_info); return ret; } diff --git a/include/uapi/drm/xen_zcopy_drm.h b/include/uapi/drm/xen_zcopy_drm.h index eb1d452e0bffa6..120b08abc6b658 100644 --- a/include/uapi/drm/xen_zcopy_drm.h +++ b/include/uapi/drm/xen_zcopy_drm.h @@ -53,6 +53,8 @@ extern "C" { * o closes real HW driver's handle with DRM_IOCTL_GEM_CLOSE * o closes zero-copy driver's handle with DRM_IOCTL_GEM_CLOSE * o closes file descriptor of the exported buffer + * o may wait for the object to be actually freed via wait_handle + * and DRM_XEN_ZCOPY_DUMB_WAIT_FREE */ #define DRM_XEN_ZCOPY_DUMB_FROM_REFS 0x00 @@ -64,6 +66,7 @@ struct drm_xen_zcopy_dumb_from_refs { uint32_t *grefs; uint64_t otherend_id; struct drm_mode_create_dumb dumb; + uint32_t wait_handle; }; /* @@ -97,10 +100,28 @@ struct drm_xen_zcopy_dumb_to_refs { uint32_t handle; }; +/* + * This will block until dumb buffer with the wait handle provided be freed: + * this is needed for synchronization between frontend and backend in case + * frontend provides grant references of the buffer via + * DRM_XEN_ZCOPY_DUMB_FROM_REFS IOCTL and which must be released before + * backend replies with XENDISPL_OP_DBUF_DESTROY response + * wait_handle must be the same value returned while calling + * DRM_XEN_ZCOPY_DUMB_FROM_REFS IOCTL + */ +#define DRM_XEN_ZCOPY_DUMB_WAIT_FREE 0x02 + +struct drm_xen_zcopy_dumb_wait_free { + uint32_t wait_handle; + uint32_t wait_to_ms; +}; + #define DRM_IOCTL_XEN_ZCOPY_DUMB_FROM_REFS DRM_IOWR(DRM_COMMAND_BASE + \ DRM_XEN_ZCOPY_DUMB_FROM_REFS, struct drm_xen_zcopy_dumb_from_refs) #define DRM_IOCTL_XEN_ZCOPY_DUMB_TO_REFS DRM_IOWR(DRM_COMMAND_BASE + \ DRM_XEN_ZCOPY_DUMB_TO_REFS, struct drm_xen_zcopy_dumb_to_refs) +#define DRM_IOCTL_XEN_ZCOPY_DUMB_WAIT_FREE DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_XEN_ZCOPY_DUMB_WAIT_FREE, struct drm_xen_zcopy_dumb_wait_free) #if defined(__cplusplus) }