drm: rename dev->count_lock to dev->buf_lock
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 16 Dec 2013 10:21:06 +0000 (11:21 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 23 Apr 2014 08:32:43 +0000 (10:32 +0200)
Since really that's all it protects - legacy horror stories in
drm_bufs.c. Since I don't want to waste any more time on this I didn't
bother to actually look at what it protects in there, but it's at
least contained now.

v2: Move the spurious hunk to the right patch (Thierry).

Cc: Thierry Reding <thierry.reding@gmail.com>
Reviewed-by: Thierry Reding <treding@nvidia.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_stub.c
include/drm/drmP.h

index edec31fe3fed865aa2669e7c8e17aec912a82058..ef7f0199a0f1e59a0aab8682325db1ab44ceffd6 100644 (file)
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
                DRM_DEBUG("zone invalid\n");
                return -EINVAL;
        }
-       spin_lock(&dev->count_lock);
+       spin_lock(&dev->buf_lock);
        if (dev->buf_use) {
-               spin_unlock(&dev->count_lock);
+               spin_unlock(&dev->buf_lock);
                return -EBUSY;
        }
        atomic_inc(&dev->buf_alloc);
-       spin_unlock(&dev->count_lock);
+       spin_unlock(&dev->buf_lock);
 
        mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
        page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
        total = PAGE_SIZE << page_order;
 
-       spin_lock(&dev->count_lock);
+       spin_lock(&dev->buf_lock);
        if (dev->buf_use) {
-               spin_unlock(&dev->count_lock);
+               spin_unlock(&dev->buf_lock);
                return -EBUSY;
        }
        atomic_inc(&dev->buf_alloc);
-       spin_unlock(&dev->count_lock);
+       spin_unlock(&dev->buf_lock);
 
        mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
                return -EINVAL;
 
-       spin_lock(&dev->count_lock);
+       spin_lock(&dev->buf_lock);
        if (dev->buf_use) {
-               spin_unlock(&dev->count_lock);
+               spin_unlock(&dev->buf_lock);
                return -EBUSY;
        }
        atomic_inc(&dev->buf_alloc);
-       spin_unlock(&dev->count_lock);
+       spin_unlock(&dev->buf_lock);
 
        mutex_lock(&dev->struct_mutex);
        entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
  * \param arg pointer to a drm_buf_info structure.
  * \return zero on success or a negative number on failure.
  *
- * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * Increments drm_device::buf_use while holding the drm_device::buf_lock
  * lock, preventing of allocating more buffers after this call. Information
  * about each requested buffer is then copied into user space.
  */
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
        if (!dma)
                return -EINVAL;
 
-       spin_lock(&dev->count_lock);
+       spin_lock(&dev->buf_lock);
        if (atomic_read(&dev->buf_alloc)) {
-               spin_unlock(&dev->count_lock);
+               spin_unlock(&dev->buf_lock);
                return -EBUSY;
        }
        ++dev->buf_use;         /* Can't allocate more after this call */
-       spin_unlock(&dev->count_lock);
+       spin_unlock(&dev->buf_lock);
 
        for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
                if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        if (!dma)
                return -EINVAL;
 
-       spin_lock(&dev->count_lock);
+       spin_lock(&dev->buf_lock);
        if (atomic_read(&dev->buf_alloc)) {
-               spin_unlock(&dev->count_lock);
+               spin_unlock(&dev->buf_lock);
                return -EBUSY;
        }
        dev->buf_use++;         /* Can't allocate more after this call */
-       spin_unlock(&dev->count_lock);
+       spin_unlock(&dev->buf_lock);
 
        if (request->count >= dma->buf_count) {
                if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
index 4c24c3ac1efaf28225aa635834e982c9f5a6055b..5394b201c3d039ae5373f52517a61603d6b8ba2e 100644 (file)
@@ -569,7 +569,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
        INIT_LIST_HEAD(&dev->maplist);
        INIT_LIST_HEAD(&dev->vblank_event_list);
 
-       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->buf_lock);
        spin_lock_init(&dev->event_lock);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->ctxlist_mutex);
index a20d882ca26516b911940fb868588b85243139f3..85682d959c7e1872bf8ba73808e4b095d189f78f 100644 (file)
@@ -1069,7 +1069,6 @@ struct drm_device {
 
        /** \name Locks */
        /*@{ */
-       spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
        struct mutex struct_mutex;      /**< For others */
        struct mutex master_mutex;      /**< For drm_minor::master and drm_file::is_master */
        /*@} */
@@ -1077,6 +1076,7 @@ struct drm_device {
        /** \name Usage Counters */
        /*@{ */
        int open_count;                 /**< Outstanding files open, protected by drm_global_mutex. */
+       spinlock_t buf_lock;            /**< For drm_device::buf_use and a few other things. */
        int buf_use;                    /**< Buffers in use -- cannot alloc */
        atomic_t buf_alloc;             /**< Buffer allocation in progress */
        /*@} */