Lines Matching refs:srf
41 struct vmw_surface srf; member
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) in vmw_surface_dma_size() argument
607 return srf->num_sizes * sizeof(struct vmw_surface_dma); in vmw_surface_dma_size()
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) in vmw_surface_define_size() argument
621 return sizeof(struct vmw_surface_define) + srf->num_sizes * in vmw_surface_define_size()
660 static void vmw_surface_define_encode(const struct vmw_surface *srf, in vmw_surface_define_encode() argument
670 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); in vmw_surface_define_encode()
674 cmd->body.sid = srf->res.id; in vmw_surface_define_encode()
675 cmd->body.surfaceFlags = srf->flags; in vmw_surface_define_encode()
676 cmd->body.format = cpu_to_le32(srf->format); in vmw_surface_define_encode()
678 cmd->body.face[i].numMipLevels = srf->mip_levels[i]; in vmw_surface_define_encode()
682 src_size = srf->sizes; in vmw_surface_define_encode()
684 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { in vmw_surface_define_encode()
701 static void vmw_surface_dma_encode(struct vmw_surface *srf, in vmw_surface_dma_encode() argument
707 uint32_t bpp = vmw_sf_bpp[srf->format].bpp; in vmw_surface_dma_encode()
708 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; in vmw_surface_dma_encode()
711 for (i = 0; i < srf->num_sizes; ++i) { in vmw_surface_dma_encode()
716 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; in vmw_surface_dma_encode()
717 const struct drm_vmw_size *cur_size = &srf->sizes[i]; in vmw_surface_dma_encode()
725 body->host.sid = srf->res.id; in vmw_surface_dma_encode()
755 struct vmw_surface *srf; in vmw_hw_surface_destroy() local
777 srf = container_of(res, struct vmw_surface, res); in vmw_hw_surface_destroy()
778 dev_priv->used_memory_size -= srf->backup_size; in vmw_hw_surface_destroy()
787 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); in vmw_surface_res_free() local
789 if (srf->backup) in vmw_surface_res_free()
790 ttm_bo_unref(&srf->backup); in vmw_surface_res_free()
791 kfree(srf->offsets); in vmw_surface_res_free()
792 kfree(srf->sizes); in vmw_surface_res_free()
793 kfree(srf->snooper.image); in vmw_surface_res_free()
794 kfree(srf); in vmw_surface_res_free()
813 struct vmw_surface *srf) in vmw_surface_do_validate() argument
815 struct vmw_resource *res = &srf->res; in vmw_surface_do_validate()
825 if (unlikely(dev_priv->used_memory_size + srf->backup_size >= in vmw_surface_do_validate()
833 if (srf->backup) { in vmw_surface_do_validate()
835 val_buf.bo = ttm_bo_reference(srf->backup); in vmw_surface_do_validate()
843 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, in vmw_surface_do_validate()
868 submit_size = vmw_surface_define_size(srf); in vmw_surface_do_validate()
869 if (srf->backup) in vmw_surface_do_validate()
870 submit_size += vmw_surface_dma_size(srf); in vmw_surface_do_validate()
880 vmw_surface_define_encode(srf, cmd); in vmw_surface_do_validate()
881 if (srf->backup) { in vmw_surface_do_validate()
884 cmd += vmw_surface_define_size(srf); in vmw_surface_do_validate()
885 vmw_bo_get_guest_ptr(srf->backup, &ptr); in vmw_surface_do_validate()
886 vmw_surface_dma_encode(srf, cmd, &ptr, true); in vmw_surface_do_validate()
895 if (srf->backup) { in vmw_surface_do_validate()
904 ttm_bo_unref(&srf->backup); in vmw_surface_do_validate()
911 dev_priv->used_memory_size += srf->backup_size; in vmw_surface_do_validate()
919 if (srf->backup) in vmw_surface_do_validate()
922 if (srf->backup) in vmw_surface_do_validate()
937 struct vmw_surface *srf) in vmw_surface_evict() argument
939 struct vmw_resource *res = &srf->res; in vmw_surface_evict()
954 if (!srf->backup) { in vmw_surface_evict()
955 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, in vmw_surface_evict()
958 NULL, &srf->backup); in vmw_surface_evict()
968 val_buf.bo = ttm_bo_reference(srf->backup); in vmw_surface_evict()
976 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, in vmw_surface_evict()
986 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); in vmw_surface_evict()
995 vmw_bo_get_guest_ptr(srf->backup, &ptr); in vmw_surface_evict()
996 vmw_surface_dma_encode(srf, cmd, &ptr, false); in vmw_surface_evict()
997 cmd += vmw_surface_dma_size(srf); in vmw_surface_evict()
1005 dev_priv->used_memory_size -= srf->backup_size; in vmw_surface_evict()
1028 if (srf->backup) in vmw_surface_evict()
1032 ttm_bo_unref(&srf->backup); in vmw_surface_evict()
1051 struct vmw_surface *srf) in vmw_surface_validate() argument
1058 list_del_init(&srf->lru_head); in vmw_surface_validate()
1061 ret = vmw_surface_do_validate(dev_priv, srf); in vmw_surface_validate()
1086 if (unlikely(ret != 0 && srf->res.id != -1)) { in vmw_surface_validate()
1088 list_add_tail(&srf->lru_head, &dev_priv->surface_lru); in vmw_surface_validate()
1106 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); in vmw_surface_remove_from_lists() local
1108 list_del_init(&srf->lru_head); in vmw_surface_remove_from_lists()
1112 struct vmw_surface *srf, in vmw_surface_init() argument
1116 struct vmw_resource *res = &srf->res; in vmw_surface_init()
1119 INIT_LIST_HEAD(&srf->lru_head); in vmw_surface_init()
1139 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); in vmw_user_surface_free() local
1141 container_of(srf, struct vmw_user_surface, srf); in vmw_user_surface_free()
1142 struct vmw_private *dev_priv = srf->res.dev_priv; in vmw_user_surface_free()
1145 if (srf->backup) in vmw_user_surface_free()
1146 ttm_bo_unref(&srf->backup); in vmw_user_surface_free()
1147 kfree(srf->offsets); in vmw_user_surface_free()
1148 kfree(srf->sizes); in vmw_user_surface_free()
1149 kfree(srf->snooper.image); in vmw_user_surface_free()
1170 struct vmw_surface *srf; in vmw_resource_unreserve() local
1184 srf = container_of(res, struct vmw_surface, res); in vmw_resource_unreserve()
1185 list_del_init(&srf->lru_head); in vmw_resource_unreserve()
1186 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); in vmw_resource_unreserve()
1222 struct vmw_surface *srf; in vmw_user_surface_lookup_handle() local
1235 srf = &user_srf->srf; in vmw_user_surface_lookup_handle()
1236 res = &srf->res; in vmw_user_surface_lookup_handle()
1248 *out = srf; in vmw_user_surface_lookup_handle()
1262 struct vmw_resource *res = &user_srf->srf.res; in vmw_user_surface_base_release()
1282 struct vmw_surface *srf; in vmw_surface_define_ioctl() local
1338 srf = &user_srf->srf; in vmw_surface_define_ioctl()
1339 res = &srf->res; in vmw_surface_define_ioctl()
1341 srf->flags = req->flags; in vmw_surface_define_ioctl()
1342 srf->format = req->format; in vmw_surface_define_ioctl()
1343 srf->scanout = req->scanout; in vmw_surface_define_ioctl()
1344 srf->backup = NULL; in vmw_surface_define_ioctl()
1346 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); in vmw_surface_define_ioctl()
1347 srf->num_sizes = num_sizes; in vmw_surface_define_ioctl()
1350 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); in vmw_surface_define_ioctl()
1351 if (unlikely(srf->sizes == NULL)) { in vmw_surface_define_ioctl()
1355 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), in vmw_surface_define_ioctl()
1357 if (unlikely(srf->sizes == NULL)) { in vmw_surface_define_ioctl()
1365 ret = copy_from_user(srf->sizes, user_sizes, in vmw_surface_define_ioctl()
1366 srf->num_sizes * sizeof(*srf->sizes)); in vmw_surface_define_ioctl()
1373 cur_offset = srf->offsets; in vmw_surface_define_ioctl()
1374 cur_size = srf->sizes; in vmw_surface_define_ioctl()
1376 bpp = vmw_sf_bpp[srf->format].bpp; in vmw_surface_define_ioctl()
1377 stride_bpp = vmw_sf_bpp[srf->format].s_bpp; in vmw_surface_define_ioctl()
1380 for (j = 0; j < srf->mip_levels[i]; ++j) { in vmw_surface_define_ioctl()
1393 srf->backup_size = cur_bo_offset; in vmw_surface_define_ioctl()
1395 if (srf->scanout && in vmw_surface_define_ioctl()
1396 srf->num_sizes == 1 && in vmw_surface_define_ioctl()
1397 srf->sizes[0].width == 64 && in vmw_surface_define_ioctl()
1398 srf->sizes[0].height == 64 && in vmw_surface_define_ioctl()
1399 srf->format == SVGA3D_A8R8G8B8) { in vmw_surface_define_ioctl()
1402 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); in vmw_surface_define_ioctl()
1403 if (!srf->snooper.image) { in vmw_surface_define_ioctl()
1409 srf->snooper.image = NULL; in vmw_surface_define_ioctl()
1411 srf->snooper.crtc = NULL; in vmw_surface_define_ioctl()
1421 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); in vmw_surface_define_ioctl()
1425 tmp = vmw_resource_reference(&srf->res); in vmw_surface_define_ioctl()
1445 kfree(srf->offsets); in vmw_surface_define_ioctl()
1447 kfree(srf->sizes); in vmw_surface_define_ioctl()
1465 struct vmw_surface *srf; in vmw_surface_reference_ioctl() local
1481 srf = &user_srf->srf; in vmw_surface_reference_ioctl()
1489 rep->flags = srf->flags; in vmw_surface_reference_ioctl()
1490 rep->format = srf->format; in vmw_surface_reference_ioctl()
1491 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); in vmw_surface_reference_ioctl()
1496 ret = copy_to_user(user_sizes, srf->sizes, in vmw_surface_reference_ioctl()
1497 srf->num_sizes * sizeof(*srf->sizes)); in vmw_surface_reference_ioctl()
1500 user_sizes, srf->num_sizes); in vmw_surface_reference_ioctl()
1527 *id = user_srf->srf.res.id; in vmw_surface_check()