* @index_buffer: Index buffer binding.
* @per_shader: Per shader-type bindings.
* @ua_views: UAV bindings.
+ * @so_state: StreamOutput bindings.
* @dirty: Bitmap tracking per binding-type changes that have not yet
* been emitted to the device.
* @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
struct vmw_ctx_bindinfo_ib index_buffer;
struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
+ struct vmw_ctx_bindinfo_so so_state;
unsigned long dirty;
DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_binding_build_asserts(void) __attribute__ ((unused));
static const size_t vmw_binding_cs_uav_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
};
+static const size_t vmw_binding_so_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, so_state),
+};
static const struct vmw_binding_info vmw_binding_infos[] = {
[vmw_ctx_binding_shader] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_cs_uav_offsets,
.scrub_func = vmw_binding_scrub_cs_uav},
+ [vmw_ctx_binding_so] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so),
+ .offsets = vmw_binding_so_offsets,
+ .scrub_func = vmw_binding_scrub_so},
};
/**
return 0;
}
+/**
+ * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
+ * @bi: Single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_so *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
/**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
* memory accounting.
};
/* Review this function as new bindings are added. */
- BUILD_BUG_ON(vmw_ctx_binding_max != 13);
+ BUILD_BUG_ON(vmw_ctx_binding_max != 14);
return is_binding_dirtying[binding_type];
}
return ret;
}
+static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineStreamOutputWithMob body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ ret = vmw_cotable_notify(res, cmd->body.soid);
+ if (ret)
+ return ret;
+
+ return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
+ cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res))
+ return 0;
+
+ return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Cound not find streamoutput to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
+static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct vmw_ctx_bindinfo_so binding;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->body.soid == SVGA3D_INVALID_ID)
+ return 0;
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ return 0;
+ }
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_so;
+ binding.slot = 0; /* Only one SO set to context at a time. */
+
+ vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
+ binding.slot);
+
+ return ret;
+}
+
static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
&vmw_cmd_dx_so_define, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
- &vmw_cmd_dx_cid_check, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
- true, false, true),
+ &vmw_cmd_dx_destroy_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
+ &vmw_cmd_dx_set_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
&vmw_cmd_dx_set_so_targets, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
&vmw_cmd_sm5_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
+ &vmw_cmd_dx_define_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
+ &vmw_cmd_dx_bind_streamoutput, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+/**
+ * struct vmw_dx_streamoutput - Streamoutput resource metadata.
+ * @res: Base resource struct.
+ * @ctx: Non-refcounted context to which @res belong.
+ * @cotable: Refcounted cotable holding this Streamoutput.
+ * @cotable_head: List head for cotable-so_res list.
+ * @id: User-space provided identifier.
+ * @size: User-space provided mob size.
+ * @committed: Whether streamoutput is actually created or pending creation.
+ */
+struct vmw_dx_streamoutput {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ struct vmw_resource *cotable;
+ struct list_head cotable_head;
+ u32 id;
+ u32 size;
+ bool committed;
+};
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res);
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf);
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+
+static size_t vmw_streamoutput_size;
+
+static const struct vmw_res_func vmw_dx_streamoutput_func = {
+ .res_type = vmw_res_streamoutput,
+ .needs_backup = true,
+ .may_evict = false,
+ .type_name = "DX streamoutput",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_streamoutput_create,
+ .destroy = NULL, /* Command buffer managed resource. */
+ .bind = vmw_dx_streamoutput_bind,
+ .unbind = vmw_dx_streamoutput_unbind,
+ .commit_notify = vmw_dx_streamoutput_commit_notify,
+};
+
+static inline struct vmw_dx_streamoutput *
+vmw_res_to_dx_streamoutput(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_dx_streamoutput, res);
+}
+
+/**
+ * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (!list_empty(&so->cotable_head) || !so->committed )
+ return 0;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = so->id;
+ cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ int ret = 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ if (vmw_resource_mob_attached(res)) {
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+
+ res->id = so->id;
+
+ return ret;
+}
+
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ int ret;
+
+ if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (list_empty(&so->cotable_head))
+ return 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ res->id = -1;
+ list_del_init(&so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_scrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ if (ret)
+ return ret;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_bo_fence_single(val_buf->bo, fence);
+
+ if (fence != NULL)
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ if (state == VMW_CMDBUF_RES_ADD) {
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+ so->committed = true;
+ res->id = so->id;
+ mutex_unlock(&dev_priv->binding_mutex);
+ } else {
+ mutex_lock(&dev_priv->binding_mutex);
+ list_del_init(&so->cotable_head);
+ so->committed = false;
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+}
+
+/**
+ * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: User-space identifier for lookup.
+ *
+ * Return: Valid refcounted vmw_resource on success, error pointer on failure.
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key)
+{
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
+ user_key);
+}
+
+static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ vmw_resource_unreference(&so->cotable);
+ kfree(so);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
+}
+
+static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
+{
+ /* Destroyed by user-space cmd buf or as part of context takedown. */
+ res->id = -1;
+}
+
+/**
+ * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
+ * @man: Command buffer managed resource manager for current context.
+ * @ctx: Pointer to context resource.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx, u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_dx_streamoutput *so;
+ struct vmw_resource *res;
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (!vmw_streamoutput_size)
+ vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size, &ttm_opt_ctx);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for streamout.\n");
+ return ret;
+ }
+
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size);
+ return -ENOMEM;
+ }
+
+ res = &so->res;
+ so->ctx = ctx;
+ so->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
+ so->id = user_key;
+ so->committed = false;
+ INIT_LIST_HEAD(&so->cotable_head);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_dx_streamoutput_res_free,
+ &vmw_dx_streamoutput_func);
+ if (ret)
+ goto out_resource_init;
+
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = so->id;
+ res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
+ * @res: The streamoutput res for which need to set size.
+ * @size: The size provided by user-space to set.
+ */
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ so->size = size;
+}
+
+/**
+ * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_resource *r;
+
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
+ (u32)user_key, list, &r);
+}
+
+/**
+ * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
+ * @dev_priv: Device private.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ */
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_dx_streamoutput *entry, *next;
+
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+ list_for_each_entry_safe(entry, next, list, cotable_head) {
+ WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
+ if (!readback)
+ entry->committed =false;
+ }
+}