1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include <ttm/ttm_placement.h>
33 * struct vmw_user_surface - User-space visible surface resource
35 * @base: The TTM base object handling user-space visibility.
36 * @srf: The surface metadata.
37 * @size: TTM accounting size for the surface.
39 struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
43 uint32_t backup_handle;
47 * struct vmw_surface_offset - Backing store mip level offset info
49 * @face: Surface face.
51 * @bo_offset: Offset into backing store of this mip level.
54 struct vmw_surface_offset {
60 static void vmw_user_surface_free(struct vmw_resource *res);
61 static struct vmw_resource *
62 vmw_user_surface_base_to_res(struct ttm_base_object *base);
63 static int vmw_legacy_srf_bind(struct vmw_resource *res,
64 struct ttm_validate_buffer *val_buf);
65 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
67 struct ttm_validate_buffer *val_buf);
68 static int vmw_legacy_srf_create(struct vmw_resource *res);
69 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
71 static const struct vmw_user_resource_conv user_surface_conv = {
72 .object_type = VMW_RES_SURFACE,
73 .base_obj_to_res = vmw_user_surface_base_to_res,
74 .res_free = vmw_user_surface_free
77 const struct vmw_user_resource_conv *user_surface_converter =
81 static uint64_t vmw_user_surface_size;
83 static const struct vmw_res_func vmw_legacy_surface_func = {
84 .res_type = vmw_res_surface,
85 .needs_backup = false,
87 .type_name = "legacy surfaces",
88 .backup_placement = &vmw_srf_placement,
89 .create = &vmw_legacy_srf_create,
90 .destroy = &vmw_legacy_srf_destroy,
91 .bind = &vmw_legacy_srf_bind,
92 .unbind = &vmw_legacy_srf_unbind
96 * struct vmw_bpp - Bits per pixel info for surface storage size computation.
98 * @bpp: Bits per pixel.
99 * @s_bpp: Stride bits per pixel. See definition below.
108 * Size table for the supported SVGA3D surface formats. It consists of
109 * two values. The bpp value and the s_bpp value which is short for
110 * "stride bits per pixel" The values are given in such a way that the
111 * minimum stride for the image is calculated using
113 * min_stride = w*s_bpp
115 * and the total memory requirement for the image is
117 * h*min_stride*bpp/s_bpp
120 static const struct vmw_bpp vmw_sf_bpp[] = {
121 [SVGA3D_FORMAT_INVALID] = {0, 0},
122 [SVGA3D_X8R8G8B8] = {32, 32},
123 [SVGA3D_A8R8G8B8] = {32, 32},
124 [SVGA3D_R5G6B5] = {16, 16},
125 [SVGA3D_X1R5G5B5] = {16, 16},
126 [SVGA3D_A1R5G5B5] = {16, 16},
127 [SVGA3D_A4R4G4B4] = {16, 16},
128 [SVGA3D_Z_D32] = {32, 32},
129 [SVGA3D_Z_D16] = {16, 16},
130 [SVGA3D_Z_D24S8] = {32, 32},
131 [SVGA3D_Z_D15S1] = {16, 16},
132 [SVGA3D_LUMINANCE8] = {8, 8},
133 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
134 [SVGA3D_LUMINANCE16] = {16, 16},
135 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
136 [SVGA3D_DXT1] = {4, 16},
137 [SVGA3D_DXT2] = {8, 32},
138 [SVGA3D_DXT3] = {8, 32},
139 [SVGA3D_DXT4] = {8, 32},
140 [SVGA3D_DXT5] = {8, 32},
141 [SVGA3D_BUMPU8V8] = {16, 16},
142 [SVGA3D_BUMPL6V5U5] = {16, 16},
143 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
144 [SVGA3D_ARGB_S10E5] = {16, 16},
145 [SVGA3D_ARGB_S23E8] = {32, 32},
146 [SVGA3D_A2R10G10B10] = {32, 32},
147 [SVGA3D_V8U8] = {16, 16},
148 [SVGA3D_Q8W8V8U8] = {32, 32},
149 [SVGA3D_CxV8U8] = {16, 16},
150 [SVGA3D_X8L8V8U8] = {32, 32},
151 [SVGA3D_A2W10V10U10] = {32, 32},
152 [SVGA3D_ALPHA8] = {8, 8},
153 [SVGA3D_R_S10E5] = {16, 16},
154 [SVGA3D_R_S23E8] = {32, 32},
155 [SVGA3D_RG_S10E5] = {16, 16},
156 [SVGA3D_RG_S23E8] = {32, 32},
157 [SVGA3D_BUFFER] = {8, 8},
158 [SVGA3D_Z_D24X8] = {32, 32},
159 [SVGA3D_V16U16] = {32, 32},
160 [SVGA3D_G16R16] = {32, 32},
161 [SVGA3D_A16B16G16R16] = {64, 64},
162 [SVGA3D_UYVY] = {12, 12},
163 [SVGA3D_YUY2] = {12, 12},
164 [SVGA3D_NV12] = {12, 8},
165 [SVGA3D_AYUV] = {32, 32},
166 [SVGA3D_BC4_UNORM] = {4, 16},
167 [SVGA3D_BC5_UNORM] = {8, 32},
168 [SVGA3D_Z_DF16] = {16, 16},
169 [SVGA3D_Z_DF24] = {24, 24},
170 [SVGA3D_Z_D24S8_INT] = {32, 32}
175 * struct vmw_surface_dma - SVGA3D DMA command
177 struct vmw_surface_dma {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdSurfaceDMA body;
181 SVGA3dCmdSurfaceDMASuffix suffix;
185 * struct vmw_surface_define - SVGA3D Surface Define command
187 struct vmw_surface_define {
188 SVGA3dCmdHeader header;
189 SVGA3dCmdDefineSurface body;
193 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
195 struct vmw_surface_destroy {
196 SVGA3dCmdHeader header;
197 SVGA3dCmdDestroySurface body;
202 * vmw_surface_dma_size - Compute fifo size for a dma command.
204 * @srf: Pointer to a struct vmw_surface
206 * Computes the required size for a surface dma command for backup or
207 * restoration of the surface represented by @srf.
209 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
211 return srf->num_sizes * sizeof(struct vmw_surface_dma);
216 * vmw_surface_define_size - Compute fifo size for a surface define command.
218 * @srf: Pointer to a struct vmw_surface
220 * Computes the required size for a surface define command for the definition
221 * of the surface represented by @srf.
223 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
225 return sizeof(struct vmw_surface_define) + srf->num_sizes *
231 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
233 * Computes the required size for a surface destroy command for the destruction
236 static inline uint32_t vmw_surface_destroy_size(void)
238 return sizeof(struct vmw_surface_destroy);
242 * vmw_surface_destroy_encode - Encode a surface_destroy command.
244 * @id: The surface id
245 * @cmd_space: Pointer to memory area in which the commands should be encoded.
247 static void vmw_surface_destroy_encode(uint32_t id,
250 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
253 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
254 cmd->header.size = sizeof(cmd->body);
259 * vmw_surface_define_encode - Encode a surface_define command.
261 * @srf: Pointer to a struct vmw_surface object.
262 * @cmd_space: Pointer to memory area in which the commands should be encoded.
264 static void vmw_surface_define_encode(const struct vmw_surface *srf,
267 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
269 struct drm_vmw_size *src_size;
270 SVGA3dSize *cmd_size;
274 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
276 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
277 cmd->header.size = cmd_len;
278 cmd->body.sid = srf->res.id;
279 cmd->body.surfaceFlags = srf->flags;
280 cmd->body.format = cpu_to_le32(srf->format);
281 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
282 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
285 cmd_size = (SVGA3dSize *) cmd;
286 src_size = srf->sizes;
288 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
289 cmd_size->width = src_size->width;
290 cmd_size->height = src_size->height;
291 cmd_size->depth = src_size->depth;
296 * vmw_surface_dma_encode - Encode a surface_dma command.
298 * @srf: Pointer to a struct vmw_surface object.
299 * @cmd_space: Pointer to memory area in which the commands should be encoded.
300 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
301 * should be placed or read from.
302 * @to_surface: Boolean whether to DMA to the surface or from the surface.
304 static void vmw_surface_dma_encode(struct vmw_surface *srf,
306 const SVGAGuestPtr *ptr,
310 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
311 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
312 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
314 for (i = 0; i < srf->num_sizes; ++i) {
315 SVGA3dCmdHeader *header = &cmd->header;
316 SVGA3dCmdSurfaceDMA *body = &cmd->body;
317 SVGA3dCopyBox *cb = &cmd->cb;
318 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
319 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
320 const struct drm_vmw_size *cur_size = &srf->sizes[i];
322 header->id = SVGA_3D_CMD_SURFACE_DMA;
323 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
325 body->guest.ptr = *ptr;
326 body->guest.ptr.offset += cur_offset->bo_offset;
327 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
328 body->host.sid = srf->res.id;
329 body->host.face = cur_offset->face;
330 body->host.mipmap = cur_offset->mip;
331 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
332 SVGA3D_READ_HOST_VRAM);
339 cb->w = cur_size->width;
340 cb->h = cur_size->height;
341 cb->d = cur_size->depth;
343 suffix->suffixSize = sizeof(*suffix);
344 suffix->maximumOffset = body->guest.pitch*cur_size->height*
345 cur_size->depth*bpp / stride_bpp;
346 suffix->flags.discard = 0;
347 suffix->flags.unsynchronized = 0;
348 suffix->flags.reserved = 0;
355 * vmw_hw_surface_destroy - destroy a Device surface
357 * @res: Pointer to a struct vmw_resource embedded in a struct
360 * Destroys a the device surface associated with a struct vmw_surface if
361 * any, and adjusts accounting and resource count accordingly.
363 static void vmw_hw_surface_destroy(struct vmw_resource *res)
366 struct vmw_private *dev_priv = res->dev_priv;
367 struct vmw_surface *srf;
372 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
373 if (unlikely(cmd == NULL)) {
374 DRM_ERROR("Failed reserving FIFO space for surface "
379 vmw_surface_destroy_encode(res->id, cmd);
380 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
383 * used_memory_size_atomic, or separate lock
384 * to avoid taking dev_priv::cmdbuf_mutex in
388 mutex_lock(&dev_priv->cmdbuf_mutex);
389 srf = vmw_res_to_srf(res);
390 dev_priv->used_memory_size -= res->backup_size;
391 mutex_unlock(&dev_priv->cmdbuf_mutex);
393 vmw_3d_resource_dec(dev_priv, false);
397 * vmw_legacy_srf_create - Create a device surface as part of the
398 * resource validation process.
400 * @res: Pointer to a struct vmw_surface.
402 * If the surface doesn't have a hw id.
404 * Returns -EBUSY if there wasn't sufficient device resources to
405 * complete the validation. Retry after freeing up resources.
407 * May return other errors if the kernel is out of guest resources.
409 static int vmw_legacy_srf_create(struct vmw_resource *res)
411 struct vmw_private *dev_priv = res->dev_priv;
412 struct vmw_surface *srf;
413 uint32_t submit_size;
417 if (likely(res->id != -1))
420 srf = vmw_res_to_srf(res);
421 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
422 dev_priv->memory_size))
426 * Alloc id for the resource.
429 ret = vmw_resource_alloc_id(res);
430 if (unlikely(ret != 0)) {
431 DRM_ERROR("Failed to allocate a surface id.\n");
435 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
441 * Encode surface define- commands.
444 submit_size = vmw_surface_define_size(srf);
445 cmd = vmw_fifo_reserve(dev_priv, submit_size);
446 if (unlikely(cmd == NULL)) {
447 DRM_ERROR("Failed reserving FIFO space for surface "
453 vmw_surface_define_encode(srf, cmd);
454 vmw_fifo_commit(dev_priv, submit_size);
456 * Surface memory usage accounting.
459 dev_priv->used_memory_size += res->backup_size;
463 vmw_resource_release_id(res);
469 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
471 * @res: Pointer to a struct vmw_res embedded in a struct
473 * @val_buf: Pointer to a struct ttm_validate_buffer containing
474 * information about the backup buffer.
475 * @bind: Boolean wether to DMA to the surface.
477 * Transfer backup data to or from a legacy surface as part of the
478 * validation process.
479 * May return other errors if the kernel is out of guest resources.
480 * The backup buffer will be fenced or idle upon successful completion,
481 * and if the surface needs persistent backup storage, the backup buffer
482 * will also be returned reserved iff @bind is true.
484 static int vmw_legacy_srf_dma(struct vmw_resource *res,
485 struct ttm_validate_buffer *val_buf,
489 struct vmw_fence_obj *fence;
490 uint32_t submit_size;
491 struct vmw_surface *srf = vmw_res_to_srf(res);
493 struct vmw_private *dev_priv = res->dev_priv;
495 BUG_ON(val_buf->bo == NULL);
497 submit_size = vmw_surface_dma_size(srf);
498 cmd = vmw_fifo_reserve(dev_priv, submit_size);
499 if (unlikely(cmd == NULL)) {
500 DRM_ERROR("Failed reserving FIFO space for surface "
504 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
505 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
507 vmw_fifo_commit(dev_priv, submit_size);
510 * Create a fence object and fence the backup buffer.
513 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
516 vmw_fence_single_bo(val_buf->bo, fence);
518 if (likely(fence != NULL))
519 vmw_fence_obj_unreference(&fence);
525 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
526 * surface validation process.
528 * @res: Pointer to a struct vmw_res embedded in a struct
530 * @val_buf: Pointer to a struct ttm_validate_buffer containing
531 * information about the backup buffer.
533 * This function will copy backup data to the surface if the
534 * backup buffer is dirty.
536 static int vmw_legacy_srf_bind(struct vmw_resource *res,
537 struct ttm_validate_buffer *val_buf)
539 if (!res->backup_dirty)
542 return vmw_legacy_srf_dma(res, val_buf, true);
547 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
548 * surface eviction process.
550 * @res: Pointer to a struct vmw_res embedded in a struct
552 * @val_buf: Pointer to a struct ttm_validate_buffer containing
553 * information about the backup buffer.
555 * This function will copy backup data from the surface.
557 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
559 struct ttm_validate_buffer *val_buf)
561 if (unlikely(readback))
562 return vmw_legacy_srf_dma(res, val_buf, false);
567 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
568 * resource eviction process.
570 * @res: Pointer to a struct vmw_res embedded in a struct
573 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
575 struct vmw_private *dev_priv = res->dev_priv;
576 uint32_t submit_size;
579 BUG_ON(res->id == -1);
582 * Encode the dma- and surface destroy commands.
585 submit_size = vmw_surface_destroy_size();
586 cmd = vmw_fifo_reserve(dev_priv, submit_size);
587 if (unlikely(cmd == NULL)) {
588 DRM_ERROR("Failed reserving FIFO space for surface "
593 vmw_surface_destroy_encode(res->id, cmd);
594 vmw_fifo_commit(dev_priv, submit_size);
597 * Surface memory usage accounting.
600 dev_priv->used_memory_size -= res->backup_size;
603 * Release the surface ID.
606 vmw_resource_release_id(res);
613 * vmw_surface_init - initialize a struct vmw_surface
615 * @dev_priv: Pointer to a device private struct.
616 * @srf: Pointer to the struct vmw_surface to initialize.
617 * @res_free: Pointer to a resource destructor used to free
620 static int vmw_surface_init(struct vmw_private *dev_priv,
621 struct vmw_surface *srf,
622 void (*res_free) (struct vmw_resource *res))
625 struct vmw_resource *res = &srf->res;
627 BUG_ON(res_free == NULL);
628 (void) vmw_3d_resource_inc(dev_priv, false);
629 ret = vmw_resource_init(dev_priv, res, true, res_free,
630 &vmw_legacy_surface_func);
632 if (unlikely(ret != 0)) {
633 vmw_3d_resource_dec(dev_priv, false);
639 * The surface won't be visible to hardware until a
643 vmw_resource_activate(res, vmw_hw_surface_destroy);
648 * vmw_user_surface_base_to_res - TTM base object to resource converter for
649 * user visible surfaces
651 * @base: Pointer to a TTM base object
653 * Returns the struct vmw_resource embedded in a struct vmw_surface
654 * for the user-visible object identified by the TTM base object @base.
656 static struct vmw_resource *
657 vmw_user_surface_base_to_res(struct ttm_base_object *base)
659 return &(container_of(base, struct vmw_user_surface, base)->srf.res);
663 * vmw_user_surface_free - User visible surface resource destructor
665 * @res: A struct vmw_resource embedded in a struct vmw_surface.
667 static void vmw_user_surface_free(struct vmw_resource *res)
669 struct vmw_surface *srf = vmw_res_to_srf(res);
670 struct vmw_user_surface *user_srf =
671 container_of(srf, struct vmw_user_surface, srf);
672 struct vmw_private *dev_priv = srf->res.dev_priv;
673 uint32_t size = user_srf->size;
677 kfree(srf->snooper.image);
678 ttm_base_object_kfree(user_srf, base);
679 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
683 * vmw_user_surface_free - User visible surface TTM base object destructor
685 * @p_base: Pointer to a pointer to a TTM base object
686 * embedded in a struct vmw_user_surface.
688 * Drops the base object's reference on its resource, and the
689 * pointer pointed to by *p_base is set to NULL.
691 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
693 struct ttm_base_object *base = *p_base;
694 struct vmw_user_surface *user_srf =
695 container_of(base, struct vmw_user_surface, base);
696 struct vmw_resource *res = &user_srf->srf.res;
699 vmw_resource_unreference(&res);
703 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
704 * the user surface destroy functionality.
706 * @dev: Pointer to a struct drm_device.
707 * @data: Pointer to data copied from / to user-space.
708 * @file_priv: Pointer to a drm file private structure.
710 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
711 struct drm_file *file_priv)
713 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
714 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
716 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
720 * vmw_user_surface_define_ioctl - Ioctl function implementing
721 * the user surface define functionality.
723 * @dev: Pointer to a struct drm_device.
724 * @data: Pointer to data copied from / to user-space.
725 * @file_priv: Pointer to a drm file private structure.
727 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv)
730 struct vmw_private *dev_priv = vmw_priv(dev);
731 struct vmw_user_surface *user_srf;
732 struct vmw_surface *srf;
733 struct vmw_resource *res;
734 struct vmw_resource *tmp;
735 union drm_vmw_surface_create_arg *arg =
736 (union drm_vmw_surface_create_arg *)data;
737 struct drm_vmw_surface_create_req *req = &arg->req;
738 struct drm_vmw_surface_arg *rep = &arg->rep;
739 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
740 struct drm_vmw_size __user *user_sizes;
743 uint32_t cur_bo_offset;
744 struct drm_vmw_size *cur_size;
745 struct vmw_surface_offset *cur_offset;
750 struct vmw_master *vmaster = vmw_master(file_priv->master);
752 if (unlikely(vmw_user_surface_size == 0))
753 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
757 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
758 num_sizes += req->mip_levels[i];
760 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
761 DRM_VMW_MAX_MIP_LEVELS)
764 size = vmw_user_surface_size + 128 +
765 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
766 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
769 ret = ttm_read_lock(&vmaster->lock, true);
770 if (unlikely(ret != 0))
773 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
775 if (unlikely(ret != 0)) {
776 if (ret != -ERESTARTSYS)
777 DRM_ERROR("Out of graphics memory for surface"
782 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
783 if (unlikely(user_srf == NULL)) {
785 goto out_no_user_srf;
788 srf = &user_srf->srf;
791 srf->flags = req->flags;
792 srf->format = req->format;
793 srf->scanout = req->scanout;
795 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
796 srf->num_sizes = num_sizes;
797 user_srf->size = size;
799 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
800 if (unlikely(srf->sizes == NULL)) {
804 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
806 if (unlikely(srf->sizes == NULL)) {
811 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
814 ret = copy_from_user(srf->sizes, user_sizes,
815 srf->num_sizes * sizeof(*srf->sizes));
816 if (unlikely(ret != 0)) {
821 srf->base_size = *srf->sizes;
822 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
823 srf->multisample_count = 1;
826 cur_offset = srf->offsets;
827 cur_size = srf->sizes;
829 bpp = vmw_sf_bpp[srf->format].bpp;
830 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
832 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
833 for (j = 0; j < srf->mip_levels[i]; ++j) {
835 (cur_size->width * stride_bpp + 7) >> 3;
837 cur_offset->face = i;
839 cur_offset->bo_offset = cur_bo_offset;
840 cur_bo_offset += stride * cur_size->height *
841 cur_size->depth * bpp / stride_bpp;
846 res->backup_size = cur_bo_offset;
849 srf->num_sizes == 1 &&
850 srf->sizes[0].width == 64 &&
851 srf->sizes[0].height == 64 &&
852 srf->format == SVGA3D_A8R8G8B8) {
854 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
855 /* clear the image */
856 if (srf->snooper.image) {
857 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
859 DRM_ERROR("Failed to allocate cursor_image\n");
864 srf->snooper.image = NULL;
866 srf->snooper.crtc = NULL;
868 user_srf->base.shareable = false;
869 user_srf->base.tfile = NULL;
872 * From this point, the generic resource management functions
873 * destroy the object on failure.
876 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
877 if (unlikely(ret != 0))
880 tmp = vmw_resource_reference(&srf->res);
881 ret = ttm_base_object_init(tfile, &user_srf->base,
882 req->shareable, VMW_RES_SURFACE,
883 &vmw_user_surface_base_release, NULL);
885 if (unlikely(ret != 0)) {
886 vmw_resource_unreference(&tmp);
887 vmw_resource_unreference(&res);
891 rep->sid = user_srf->base.hash.key;
892 vmw_resource_unreference(&res);
894 ttm_read_unlock(&vmaster->lock);
901 ttm_base_object_kfree(user_srf, base);
903 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
905 ttm_read_unlock(&vmaster->lock);
910 * vmw_user_surface_define_ioctl - Ioctl function implementing
911 * the user surface reference functionality.
913 * @dev: Pointer to a struct drm_device.
914 * @data: Pointer to data copied from / to user-space.
915 * @file_priv: Pointer to a drm file private structure.
917 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
920 union drm_vmw_surface_reference_arg *arg =
921 (union drm_vmw_surface_reference_arg *)data;
922 struct drm_vmw_surface_arg *req = &arg->req;
923 struct drm_vmw_surface_create_req *rep = &arg->rep;
924 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
925 struct vmw_surface *srf;
926 struct vmw_user_surface *user_srf;
927 struct drm_vmw_size __user *user_sizes;
928 struct ttm_base_object *base;
931 base = ttm_base_object_lookup(tfile, req->sid);
932 if (unlikely(base == NULL)) {
933 DRM_ERROR("Could not find surface to reference.\n");
937 if (unlikely(base->object_type != VMW_RES_SURFACE))
938 goto out_bad_resource;
940 user_srf = container_of(base, struct vmw_user_surface, base);
941 srf = &user_srf->srf;
943 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
944 if (unlikely(ret != 0)) {
945 DRM_ERROR("Could not add a reference to a surface.\n");
946 goto out_no_reference;
949 rep->flags = srf->flags;
950 rep->format = srf->format;
951 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
952 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
956 ret = copy_to_user(user_sizes, srf->sizes,
957 srf->num_sizes * sizeof(*srf->sizes));
958 if (unlikely(ret != 0)) {
959 DRM_ERROR("copy_to_user failed %p %u\n",
960 user_sizes, srf->num_sizes);
965 ttm_base_object_unref(&base);